pax_global_header00006660000000000000000000000064145517015070014517gustar00rootroot0000000000000052 comment=c8e32775df20f73d473c0694b09c727d1f0dfe07 patroni-3.2.2/000077500000000000000000000000001455170150700131775ustar00rootroot00000000000000patroni-3.2.2/.github/000077500000000000000000000000001455170150700145375ustar00rootroot00000000000000patroni-3.2.2/.github/ISSUE_TEMPLATE/000077500000000000000000000000001455170150700167225ustar00rootroot00000000000000patroni-3.2.2/.github/ISSUE_TEMPLATE/bug_report.yml000066400000000000000000000052521455170150700216210ustar00rootroot00000000000000name: Bug Report description: Create a report to help us improve labels: - bug body: - type: markdown attributes: value: | If you have a question please post it on channel [#patroni](https://postgresteam.slack.com/archives/C9XPYG92A) in the [PostgreSQL Slack](https://pgtreats.info/slack-invite). Before reporting a bug please make sure to **reproduce it with the latest Patroni version**! Please fill the form below and provide as much information as possible. Not doing so may result in your bug not being addressed in a timely manner. - type: textarea id: problem attributes: label: What happened? validations: required: true - type: textarea id: repro attributes: label: How can we reproduce it (as minimally and precisely as possible)? validations: required: true - type: textarea id: expected attributes: label: What did you expect to happen? validations: required: true - type: textarea id: environment attributes: label: Patroni/PostgreSQL/DCS version value: | - Patroni version: - PostgreSQL version: - DCS (and its version): validations: required: true - type: textarea id: patroniConfig attributes: label: Patroni configuration file description: Please copy and paste Patroni configuration file here. This will be automatically formatted into code, so no need for backticks. render: yaml validations: required: true - type: textarea id: globalConfig attributes: label: patronictl show-config description: Please copy and paste `patronictl show-config` output here. This will be automatically formatted into code, so no need for backticks. render: yaml validations: required: true - type: textarea id: patroniLogs attributes: label: Patroni log files description: Please copy and paste any relevant Patroni log output. This will be automatically formatted into code, so no need for backticks. render: shell validations: required: true - type: textarea id: postgresLogs attributes: label: PostgreSQL log files description: Please copy and paste any relevant PostgreSQL log output. This will be automatically formatted into code, so no need for backticks. render: shell validations: required: true - type: checkboxes id: issueSearch attributes: label: Have you tried to use GitHub issue search? description: Maybe there is already a similar issue solved. options: - label: 'Yes' required: true validations: required: true - type: textarea id: additional attributes: label: Anything else we need to know? description: Add any other context about the problem here. patroni-3.2.2/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000002621455170150700207120ustar00rootroot00000000000000blank_issues_enabled: false contact_links: - name: Question url: https://pgtreats.info/slack-invite about: "Please ask questions on channel #patroni in the PostgreSQL Slack" patroni-3.2.2/.github/workflows/000077500000000000000000000000001455170150700165745ustar00rootroot00000000000000patroni-3.2.2/.github/workflows/install_deps.py000066400000000000000000000112751455170150700216350ustar00rootroot00000000000000import inspect import os import shutil import subprocess import stat import sys import tarfile import zipfile def install_requirements(what): old_path = sys.path[:] w = os.path.join(os.getcwd(), os.path.dirname(inspect.getfile(inspect.currentframe()))) sys.path.insert(0, os.path.dirname(os.path.dirname(w))) try: from setup import EXTRAS_REQUIRE, read finally: sys.path = old_path requirements = ['mock>=2.0.0', 'flake8', 'pytest', 'pytest-cov'] if what == 'all' else ['behave'] requirements += ['coverage'] # try to split tests between psycopg2 and psycopg3 requirements += ['psycopg[binary]'] if sys.version_info >= (3, 8, 0) and\ (sys.platform != 'darwin' or what == 'etcd3') else ['psycopg2-binary'] for r in read('requirements.txt').split('\n'): r = r.strip() if r != '': extras = {e for e, v in EXTRAS_REQUIRE.items() if v and any(r.startswith(x) for x in v)} if not extras or what == 'all' or what in extras: requirements.append(r) subprocess.call([sys.executable, '-m', 'pip', 'install', '--upgrade', 'pip']) subprocess.call([sys.executable, '-m', 'pip', 'install', '--upgrade', 'wheel']) r = subprocess.call([sys.executable, '-m', 'pip', 'install'] + requirements) s = subprocess.call([sys.executable, '-m', 'pip', 'install', '--upgrade', 'setuptools']) return s | r def install_packages(what): from mapping import versions packages = { 'zookeeper': ['zookeeper', 'zookeeper-bin', 'zookeeperd'], 'consul': ['consul'], } packages['exhibitor'] = packages['zookeeper'] packages = packages.get(what, []) ver = versions.get(what) if float(ver) >= 15: packages += ['postgresql-{0}-citus-12.1'.format(ver)] subprocess.call(['sudo', 'apt-get', 'update', '-y']) return subprocess.call(['sudo', 'apt-get', 'install', '-y', 'postgresql-' + ver, 'expect-dev'] + packages) def get_file(url, name): try: from urllib.request import urlretrieve except ImportError: from urllib import urlretrieve print('Downloading ' + url) urlretrieve(url, name) def untar(archive, name): with tarfile.open(archive) as tar: f = tar.extractfile(name) dest = os.path.basename(name) with open(dest, 'wb') as d: shutil.copyfileobj(f, d) return dest def unzip(archive, name): with zipfile.ZipFile(archive, 'r') as z: name = z.extract(name) dest = os.path.basename(name) shutil.move(name, dest) return dest def unzip_all(archive): print('Extracting ' + archive) with zipfile.ZipFile(archive, 'r') as z: z.extractall() def chmod_755(name): os.chmod(name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) def unpack(archive, name): print('Extracting {0} from {1}'.format(name, archive)) func = unzip if archive.endswith('.zip') else untar name = func(archive, name) chmod_755(name) return name def install_etcd(): version = os.environ.get('ETCDVERSION', '3.4.23') platform = {'linux2': 'linux', 'win32': 'windows', 'cygwin': 'windows'}.get(sys.platform, sys.platform) dirname = 'etcd-v{0}-{1}-amd64'.format(version, platform) ext = 'tar.gz' if platform == 'linux' else 'zip' name = '{0}.{1}'.format(dirname, ext) url = 'https://github.com/etcd-io/etcd/releases/download/v{0}/{1}'.format(version, name) get_file(url, name) ext = '.exe' if platform == 'windows' else '' return int(unpack(name, '{0}/etcd{1}'.format(dirname, ext)) is None) def install_postgres(): version = os.environ.get('PGVERSION', '15.1-1') platform = {'darwin': 'osx', 'win32': 'windows-x64', 'cygwin': 'windows-x64'}[sys.platform] if platform == 'osx': return subprocess.call(['brew', 'install', 'expect', 'postgresql@{0}'.format(version.split('.')[0])]) name = 'postgresql-{0}-{1}-binaries.zip'.format(version, platform) get_file('http://get.enterprisedb.com/postgresql/' + name, name) unzip_all(name) bin_dir = os.path.join('pgsql', 'bin') for f in os.listdir(bin_dir): chmod_755(os.path.join(bin_dir, f)) return subprocess.call(['pgsql/bin/postgres', '-V']) def main(): what = os.environ.get('DCS', sys.argv[1] if len(sys.argv) > 1 else 'all') if what != 'all': if sys.platform.startswith('linux'): r = install_packages(what) else: r = install_postgres() if r == 0 and what.startswith('etcd'): r = install_etcd() if r != 0: return r return install_requirements(what) if __name__ == '__main__': sys.exit(main()) patroni-3.2.2/.github/workflows/mapping.py000066400000000000000000000001571455170150700206040ustar00rootroot00000000000000versions = {'etcd': '9.6', 'etcd3': '16', 'consul': '13', 'exhibitor': '12', 'raft': '11', 'kubernetes': '15'} patroni-3.2.2/.github/workflows/release.yaml000066400000000000000000000022741455170150700211050ustar00rootroot00000000000000name: Publish Patroni distributions to PyPI and TestPyPI on: push: tags: - 'v[0-9]+.[0-9]+.[0-9]+' release: types: - published jobs: build-n-publish: name: Build and publish Patroni distributions to PyPI and TestPyPI runs-on: ubuntu-latest steps: - uses: actions/checkout@master - name: Set up Python 3.9 uses: actions/setup-python@v4 with: python-version: 3.9 - name: Install dependencies run: python .github/workflows/install_deps.py - name: Run tests and flake8 run: python .github/workflows/run_tests.py - name: Install Python packaging build frontend run: python -m pip install build - name: Build a binary wheel and a source tarball run: python -m build - name: Publish distribution to Test PyPI if: github.event_name == 'push' uses: pypa/gh-action-pypi-publish@v1.5.1 with: password: ${{ secrets.TEST_PYPI_API_TOKEN }} repository_url: https://test.pypi.org/legacy/ - name: Publish distribution to PyPI if: github.event_name == 'release' uses: pypa/gh-action-pypi-publish@v1.5.1 with: password: ${{ secrets.PYPI_API_TOKEN }} patroni-3.2.2/.github/workflows/run_tests.py000066400000000000000000000032201455170150700211710ustar00rootroot00000000000000import os import shutil import subprocess import sys import tempfile def main(): what = os.environ.get('DCS', sys.argv[1] if len(sys.argv) > 1 else 'all') if what == 'all': flake8 = subprocess.call([sys.executable, 'setup.py', 'flake8']) test = subprocess.call([sys.executable, 'setup.py', 'test']) version = '.'.join(map(str, sys.version_info[:2])) shutil.move('.coverage', os.path.join(tempfile.gettempdir(), '.coverage.' + version)) return flake8 | test elif what == 'combine': tmp = tempfile.gettempdir() for name in os.listdir(tmp): if name.startswith('.coverage.'): shutil.move(os.path.join(tmp, name), name) return subprocess.call([sys.executable, '-m', 'coverage', 'combine']) env = os.environ.copy() if sys.platform.startswith('linux'): from mapping import versions version = versions.get(what) path = '/usr/lib/postgresql/{0}/bin:.'.format(version) unbuffer = ['timeout', '900', 'unbuffer'] else: if sys.platform == 'darwin': version = os.environ.get('PGVERSION', '15.1-1') path = '/usr/local/opt/postgresql@{0}/bin:.'.format(version.split('.')[0]) unbuffer = ['unbuffer'] else: path = os.path.abspath(os.path.join('pgsql', 'bin')) unbuffer = [] env['PATH'] = path + os.pathsep + env['PATH'] env['DCS'] = what if what == 'kubernetes': env['PATRONI_KUBERNETES_CONTEXT'] = 'k3d-k3s-default' return subprocess.call(unbuffer + [sys.executable, '-m', 'behave'], env=env) if __name__ == '__main__': sys.exit(main()) patroni-3.2.2/.github/workflows/tests.yaml000066400000000000000000000137051455170150700206300ustar00rootroot00000000000000name: Tests on: pull_request: push: branches: - master - 'REL_[0-9]+_[0-9]+' env: CODACY_PROJECT_TOKEN: ${{ secrets.CODACY_PROJECT_TOKEN }} SECRETS_AVAILABLE: ${{ secrets.CODACY_PROJECT_TOKEN != '' }} jobs: unit: runs-on: ${{ matrix.os }}-latest strategy: fail-fast: false matrix: os: [ubuntu, windows, macos] steps: - uses: actions/checkout@v3 - name: Set up Python 3.7 uses: actions/setup-python@v4 with: python-version: 3.7 - name: Install dependencies run: python .github/workflows/install_deps.py - name: Run tests and flake8 run: python .github/workflows/run_tests.py - name: Set up Python 3.8 uses: actions/setup-python@v4 with: python-version: 3.8 - name: Install dependencies run: python .github/workflows/install_deps.py - name: Run tests and flake8 run: python .github/workflows/run_tests.py - name: Set up Python 3.9 uses: actions/setup-python@v4 with: python-version: 3.9 - name: Install dependencies run: python .github/workflows/install_deps.py - name: Run tests and flake8 run: python .github/workflows/run_tests.py - name: Set up Python 3.10 uses: actions/setup-python@v4 with: python-version: '3.10' - name: Install dependencies run: python .github/workflows/install_deps.py - name: Run tests and flake8 run: python .github/workflows/run_tests.py - name: Set up Python 3.11 uses: actions/setup-python@v4 with: python-version: 3.11 - name: Install dependencies run: python .github/workflows/install_deps.py - name: Run tests and flake8 run: python .github/workflows/run_tests.py - name: Combine coverage run: python .github/workflows/run_tests.py combine - name: Install coveralls run: python -m pip install coveralls - name: Upload Coverage env: COVERALLS_FLAG_NAME: unit-${{ matrix.os }} COVERALLS_PARALLEL: 'true' GITHUB_TOKEN: ${{ secrets.github_token }} run: python -m coveralls --service=github behave: runs-on: ${{ matrix.os }}-latest env: DCS: ${{ matrix.dcs }} ETCDVERSION: 3.4.23 PGVERSION: 15.1-1 # for windows and macos strategy: fail-fast: false matrix: os: [ubuntu] python-version: [3.7, '3.10'] dcs: [etcd, etcd3, consul, exhibitor, kubernetes, raft] include: - os: macos python-version: 3.8 dcs: raft - os: macos python-version: 3.9 dcs: etcd - os: macos python-version: 3.11 dcs: etcd3 steps: - uses: actions/checkout@v3 - name: Set up Python uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - uses: nolar/setup-k3d-k3s@v1 if: matrix.dcs == 'kubernetes' - name: Add postgresql and citus apt repo run: | sudo apt-get update -y sudo apt-get install -y wget ca-certificates gnupg debian-archive-keyring apt-transport-https sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' sudo sh -c 'wget -qO - https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor > /etc/apt/trusted.gpg.d/apt.postgresql.org.gpg' sudo sh -c 'echo "deb [signed-by=/etc/apt/trusted.gpg.d/citusdata_community.gpg] https://packagecloud.io/citusdata/community/ubuntu/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/citusdata_community.list' sudo sh -c 'wget -qO - https://packagecloud.io/citusdata/community/gpgkey | gpg --dearmor > /etc/apt/trusted.gpg.d/citusdata_community.gpg' if: matrix.os == 'ubuntu' - name: Install dependencies run: python .github/workflows/install_deps.py - name: Run behave tests run: python .github/workflows/run_tests.py - name: Upload logs if behave failed uses: actions/upload-artifact@v3 if: failure() with: name: behave-${{ matrix.os }}-${{ matrix.dcs }}-${{ matrix.python-version }}-logs path: | features/output/*_failed/*postgres?.* features/output/*.log if-no-files-found: error retention-days: 5 - name: Generate coverage xml report run: python -m coverage xml -o cobertura.xml - name: Upload coverage to Codacy run: bash <(curl -Ls https://coverage.codacy.com/get.sh) report -r cobertura.xml -l Python --partial if: ${{ env.SECRETS_AVAILABLE == 'true' }} coveralls-finish: name: Finalize coveralls.io needs: unit runs-on: ubuntu-latest steps: - uses: actions/setup-python@v4 - run: python -m pip install coveralls - run: python -m coveralls --service=github --finish env: GITHUB_TOKEN: ${{ secrets.github_token }} codacy-final: name: Finalize Codacy needs: behave runs-on: ubuntu-latest steps: - run: bash <(curl -Ls https://coverage.codacy.com/get.sh) final if: ${{ env.SECRETS_AVAILABLE == 'true' }} pyright: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Set up Python 3.11 uses: actions/setup-python@v4 with: python-version: 3.11 - name: Install dependencies run: python -m pip install -r requirements.txt psycopg2-binary psycopg - uses: jakebailey/pyright-action@v1 with: version: 1.1.347 docs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Set up Python 3.11 uses: actions/setup-python@v4 with: python-version: 3.11 cache: pip - name: Install dependencies run: pip install tox - name: Install package dependencies run: | sudo apt update \ && sudo apt install -y \ latexmk texlive-latex-extra tex-gyre \ --no-install-recommends - name: Generate documentation run: tox -m docs patroni-3.2.2/.gitignore000066400000000000000000000012541455170150700151710ustar00rootroot00000000000000*.py[cod] # vi(m) swap files: *.sw? # C extensions *.so # Packages .cache/ *.egg *.eggs *.egg-info dist build eggs parts bin var sdist develop-eggs .installed.cfg lib lib64 # Installer logs pip-log.txt # Unit test / coverage reports .coverage .tox nosetests.xml coverage.xml htmlcov junit.xml features/output* dummy # Translations *.mo # Mr Developer .mr.developer.cfg .project .pydevproject pgpass scm-source.json # Sphinx-generated documentation docs/_build/ docs/build/ docs/source/_static/ docs/source/_templates/ docs/modules/ docs/pdf/ # Pycharm IDE .idea/ #VSCode IDE .vscode/ # Virtual environment venv*/ # Default test data directory data/ # macOS **/.DS_Store patroni-3.2.2/.readthedocs.yaml000066400000000000000000000007661455170150700164370ustar00rootroot00000000000000# .readthedocs.yaml # Read the Docs configuration file # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details # Required version: 2 # Set the version of Python and other tools you might need build: os: ubuntu-22.04 tools: python: "3.11" # Build documentation in the docs/ directory with Sphinx sphinx: configuration: docs/conf.py formats: - epub - pdf - htmlzip python: install: - requirements: requirements.docs.txt - requirements: requirements.txt patroni-3.2.2/CODEOWNERS000066400000000000000000000000601455170150700145660ustar00rootroot00000000000000# global owners * @CyberDem0n @hughcapet patroni-3.2.2/Dockerfile000066400000000000000000000177461455170150700152100ustar00rootroot00000000000000## This Dockerfile is meant to aid in the building and debugging patroni whilst developing on your local machine ## It has all the necessary components to play/debug with a single node appliance, running etcd ARG PG_MAJOR=15 ARG COMPRESS=false ARG PGHOME=/home/postgres ARG PGDATA=$PGHOME/data ARG LC_ALL=C.UTF-8 ARG LANG=C.UTF-8 FROM postgres:$PG_MAJOR as builder ARG PGHOME ARG PGDATA ARG LC_ALL ARG LANG ENV ETCDVERSION=3.3.13 CONFDVERSION=0.16.0 RUN set -ex \ && export DEBIAN_FRONTEND=noninteractive \ && echo 'APT::Install-Recommends "0";\nAPT::Install-Suggests "0";' > /etc/apt/apt.conf.d/01norecommend \ && apt-get update -y \ # postgres:10 is based on debian, which has the patroni package. We will install all required dependencies && apt-cache depends patroni | sed -n -e 's/.*Depends: \(python3-.\+\)$/\1/p' \ | grep -Ev '^python3-(sphinx|etcd|consul|kazoo|kubernetes)' \ | xargs apt-get install -y vim curl less jq locales haproxy sudo \ python3-etcd python3-kazoo python3-pip busybox \ net-tools iputils-ping dumb-init --fix-missing \ \ # Cleanup all locales but en_US.UTF-8 && find /usr/share/i18n/charmaps/ -type f ! -name UTF-8.gz -delete \ && find /usr/share/i18n/locales/ -type f ! -name en_US ! -name en_GB ! -name i18n* ! -name iso14651_t1 ! -name iso14651_t1_common ! -name 'translit_*' -delete \ && echo 'en_US.UTF-8 UTF-8' > /usr/share/i18n/SUPPORTED \ \ # Make sure we have a en_US.UTF-8 locale available && localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 \ \ # haproxy dummy config && echo 'global\n stats socket /run/haproxy/admin.sock mode 660 level admin' > /etc/haproxy/haproxy.cfg \ \ # vim config && echo 'syntax on\nfiletype plugin indent on\nset mouse-=a\nautocmd FileType yaml setlocal ts=2 sts=2 sw=2 expandtab' > /etc/vim/vimrc.local \ \ # Prepare postgres/patroni/haproxy environment && mkdir -p "$PGHOME/.config/patroni" /patroni /run/haproxy \ && ln -s ../../postgres0.yml "$PGHOME/.config/patroni/patronictl.yaml" \ && ln -s /patronictl.py /usr/local/bin/patronictl \ && sed -i "s|/var/lib/postgresql.*|$PGHOME:/bin/bash|" /etc/passwd \ && chown -R postgres:postgres /var/log \ \ # Download etcd && curl -sL "https://github.com/coreos/etcd/releases/download/v$ETCDVERSION/etcd-v$ETCDVERSION-linux-$(dpkg --print-architecture).tar.gz" \ | tar xz -C /usr/local/bin --strip=1 --wildcards --no-anchored etcd etcdctl \ \ && if [ $(dpkg --print-architecture) = 'arm64' ]; then \ # Build confd apt-get install -y git make \ && curl -sL https://go.dev/dl/go1.20.4.linux-arm64.tar.gz | tar xz -C /usr/local go \ && export GOROOT=/usr/local/go && export PATH=$PATH:$GOROOT/bin \ && git clone --recurse-submodules https://github.com/kelseyhightower/confd.git \ && make -C confd \ && cp confd/bin/confd /usr/local/bin/confd \ && rm -rf /confd /usr/local/go; \ else \ # Download confd curl -sL "https://github.com/kelseyhightower/confd/releases/download/v$CONFDVERSION/confd-$CONFDVERSION-linux-$(dpkg --print-architecture)" \ > /usr/local/bin/confd && chmod +x /usr/local/bin/confd; \ fi \ \ # Clean up all useless packages and some files && apt-get purge -y --allow-remove-essential python3-pip gzip bzip2 util-linux e2fsprogs \ libmagic1 bsdmainutils login ncurses-bin libmagic-mgc e2fslibs bsdutils \ exim4-config gnupg-agent dirmngr \ git make \ && apt-get autoremove -y \ && apt-get clean -y \ && rm -rf /var/lib/apt/lists/* \ /root/.cache \ /var/cache/debconf/* \ /etc/rc?.d \ /etc/systemd \ /docker-entrypoint* \ /sbin/pam* \ /sbin/swap* \ /sbin/unix* \ /usr/local/bin/gosu \ /usr/sbin/[acgipr]* \ /usr/sbin/*user* \ /usr/share/doc* \ /usr/share/man \ /usr/share/info \ /usr/share/i18n/locales/translit_hangul \ /usr/share/locale/?? \ /usr/share/locale/??_?? \ /usr/share/postgresql/*/man \ /usr/share/postgresql-common/pg_wrapper \ /usr/share/vim/vim80/doc \ /usr/share/vim/vim80/lang \ /usr/share/vim/vim80/tutor \ # /var/lib/dpkg/info/* \ && find /usr/bin -xtype l -delete \ && find /var/log -type f -exec truncate --size 0 {} \; \ && find /usr/lib/python3/dist-packages -name '*test*' | xargs rm -fr \ && find /lib/$(uname -m)-linux-gnu/security -type f ! -name pam_env.so ! -name pam_permit.so ! -name pam_unix.so -delete # perform compression if it is necessary ARG COMPRESS RUN if [ "$COMPRESS" = "true" ]; then \ set -ex \ # Allow certain sudo commands from postgres && echo 'postgres ALL=(ALL) NOPASSWD: /bin/tar xpJf /a.tar.xz -C /, /bin/rm /a.tar.xz, /bin/ln -snf dash /bin/sh' >> /etc/sudoers \ && ln -snf busybox /bin/sh \ && arch=$(uname -m) \ && darch=$(uname -m | sed 's/_/-/') \ && files="/bin/sh /usr/bin/sudo /usr/lib/sudo/sudoers.so /lib/$arch-linux-gnu/security/pam_*.so" \ && libs="$(ldd $files | awk '{print $3;}' | grep '^/' | sort -u) /lib/ld-linux-$darch.so.* /lib/$arch-linux-gnu/ld-linux-$darch.so.* /lib/$arch-linux-gnu/libnsl.so.* /lib/$arch-linux-gnu/libnss_compat.so.* /lib/$arch-linux-gnu/libnss_files.so.*" \ && (echo /var/run $files $libs | tr ' ' '\n' && realpath $files $libs) | sort -u | sed 's/^\///' > /exclude \ && find /etc/alternatives -xtype l -delete \ && save_dirs="usr lib var bin sbin etc/ssl etc/init.d etc/alternatives etc/apt" \ && XZ_OPT=-e9v tar -X /exclude -cpJf a.tar.xz $save_dirs \ # we call "cat /exclude" to avoid including files from the $save_dirs that are also among # the exceptions listed in the /exclude, as "uniq -u" eliminates all non-unique lines. # By calling "cat /exclude" a second time we guarantee that there will be at least two lines # for each exception and therefore they will be excluded from the output passed to 'rm'. && /bin/busybox sh -c "(find $save_dirs -not -type d && cat /exclude /exclude && echo exclude) | sort | uniq -u | xargs /bin/busybox rm" \ && /bin/busybox --install -s \ && /bin/busybox sh -c "find $save_dirs -type d -depth -exec rmdir -p {} \; 2> /dev/null"; \ fi FROM scratch COPY --from=builder / / LABEL maintainer="Alexander Kukushkin " ARG PG_MAJOR ARG COMPRESS ARG PGHOME ARG PGDATA ARG LC_ALL ARG LANG ARG PGBIN=/usr/lib/postgresql/$PG_MAJOR/bin ENV LC_ALL=$LC_ALL LANG=$LANG EDITOR=/usr/bin/editor ENV PGDATA=$PGDATA PATH=$PATH:$PGBIN COPY patroni /patroni/ COPY extras/confd/conf.d/haproxy.toml /etc/confd/conf.d/ COPY extras/confd/templates/haproxy.tmpl /etc/confd/templates/ COPY patroni*.py docker/entrypoint.sh / COPY postgres?.yml $PGHOME/ WORKDIR $PGHOME RUN sed -i 's/env python/&3/' /patroni*.py \ # "fix" patroni configs && sed -i 's/^ listen: 127.0.0.1/ listen: 0.0.0.0/' postgres?.yml \ && sed -i "s|^\( data_dir: \).*|\1$PGDATA|" postgres?.yml \ && sed -i "s|^#\( bin_dir: \).*|\1$PGBIN|" postgres?.yml \ && sed -i 's/^ - encoding: UTF8/ - locale: en_US.UTF-8\n&/' postgres?.yml \ && sed -i 's/^\(scope\|name\|etcd\| host\| authentication\| connect_address\| parameters\):/#&/' postgres?.yml \ && sed -i 's/^ \(replication\|superuser\|rewind\|unix_socket_directories\|\(\( \)\{0,1\}\(username\|password\)\)\):/#&/' postgres?.yml \ && sed -i 's/^ parameters:/&\n max_connections: 100/' postgres?.yml \ && sed -i 's/^ pg_hba:/&\n - local all all trust/' postgres?.yml \ && sed -i 's/^\(.*\) \(.*\) md5/\1 all md5/' postgres?.yml \ && if [ "$COMPRESS" = "true" ]; then chmod u+s /usr/bin/sudo; fi \ && chmod +s /bin/ping \ && chown -R postgres:postgres "$PGHOME" /run /etc/haproxy USER postgres ENTRYPOINT ["/bin/sh", "/entrypoint.sh"] patroni-3.2.2/Dockerfile.citus000066400000000000000000000250561455170150700163270ustar00rootroot00000000000000## This Dockerfile is meant to aid in the building and debugging patroni whilst developing on your local machine ## It has all the necessary components to play/debug with a single node appliance, running etcd ARG PG_MAJOR=15 ARG COMPRESS=false ARG PGHOME=/home/postgres ARG PGDATA=$PGHOME/data ARG LC_ALL=C.UTF-8 ARG LANG=C.UTF-8 FROM postgres:$PG_MAJOR as builder ARG PGHOME ARG PGDATA ARG LC_ALL ARG LANG ENV ETCDVERSION=3.3.13 CONFDVERSION=0.16.0 RUN set -ex \ && export DEBIAN_FRONTEND=noninteractive \ && echo 'APT::Install-Recommends "0";\nAPT::Install-Suggests "0";' > /etc/apt/apt.conf.d/01norecommend \ && apt-get update -y \ # postgres:PG_MAJOR is based on debian, which has the patroni package. We will install all required dependencies && apt-cache depends patroni | sed -n -e 's/.*Depends: \(python3-.\+\)$/\1/p' \ | grep -Ev '^python3-(sphinx|etcd|consul|kazoo|kubernetes)' \ | xargs apt-get install -y vim curl less jq locales haproxy sudo \ python3-etcd python3-kazoo python3-pip busybox \ net-tools iputils-ping lsb-release dumb-init --fix-missing \ && if [ $(dpkg --print-architecture) = 'arm64' ]; then \ apt-get install -y postgresql-server-dev-$PG_MAJOR \ git gcc make autoconf \ libc6-dev flex libcurl4-gnutls-dev \ libicu-dev libkrb5-dev liblz4-dev \ libpam0g-dev libreadline-dev libselinux1-dev\ libssl-dev libxslt1-dev libzstd-dev uuid-dev \ && git clone -b "main" https://github.com/citusdata/citus.git \ && MAKEFLAGS="-j $(grep -c ^processor /proc/cpuinfo)" \ && cd citus && ./configure && make install && cd ../ && rm -rf /citus; \ else \ echo "deb [signed-by=/etc/apt/trusted.gpg.d/citusdata_community.gpg] https://packagecloud.io/citusdata/community/debian/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/citusdata_community.list \ && curl -sL https://packagecloud.io/citusdata/community/gpgkey | gpg --dearmor > /etc/apt/trusted.gpg.d/citusdata_community.gpg \ && apt-get update -y \ && apt-get -y install postgresql-$PG_MAJOR-citus-11.3; \ fi \ \ # Cleanup all locales but en_US.UTF-8 && find /usr/share/i18n/charmaps/ -type f ! -name UTF-8.gz -delete \ && find /usr/share/i18n/locales/ -type f ! -name en_US ! -name en_GB ! -name i18n* ! -name iso14651_t1 ! -name iso14651_t1_common ! -name 'translit_*' -delete \ && echo 'en_US.UTF-8 UTF-8' > /usr/share/i18n/SUPPORTED \ \ # Make sure we have a en_US.UTF-8 locale available && localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 \ \ # haproxy dummy config && echo 'global\n stats socket /run/haproxy/admin.sock mode 660 level admin' > /etc/haproxy/haproxy.cfg \ \ # vim config && echo 'syntax on\nfiletype plugin indent on\nset mouse-=a\nautocmd FileType yaml setlocal ts=2 sts=2 sw=2 expandtab' > /etc/vim/vimrc.local \ \ # Prepare postgres/patroni/haproxy environment && mkdir -p $PGHOME/.config/patroni /patroni /run/haproxy \ && ln -s ../../postgres0.yml $PGHOME/.config/patroni/patronictl.yaml \ && ln -s /patronictl.py /usr/local/bin/patronictl \ && sed -i "s|/var/lib/postgresql.*|$PGHOME:/bin/bash|" /etc/passwd \ && chown -R postgres:postgres /var/log \ \ # Download etcd && curl -sL https://github.com/coreos/etcd/releases/download/v${ETCDVERSION}/etcd-v${ETCDVERSION}-linux-$(dpkg --print-architecture).tar.gz \ | tar xz -C /usr/local/bin --strip=1 --wildcards --no-anchored etcd etcdctl \ \ && if [ $(dpkg --print-architecture) = 'arm64' ]; then \ # Build confd curl -sL https://go.dev/dl/go1.20.4.linux-arm64.tar.gz | tar xz -C /usr/local go \ && export GOROOT=/usr/local/go && export PATH=$PATH:$GOROOT/bin \ && git clone --recurse-submodules https://github.com/kelseyhightower/confd.git \ && make -C confd \ && cp confd/bin/confd /usr/local/bin/confd \ && rm -rf /confd /usr/local/go; \ else \ # Download confd curl -sL "https://github.com/kelseyhightower/confd/releases/download/v$CONFDVERSION/confd-$CONFDVERSION-linux-$(dpkg --print-architecture)" \ > /usr/local/bin/confd && chmod +x /usr/local/bin/confd; \ fi \ # Prepare client cert for HAProxy && cat /etc/ssl/private/ssl-cert-snakeoil.key /etc/ssl/certs/ssl-cert-snakeoil.pem > /etc/ssl/private/ssl-cert-snakeoil.crt \ \ # Clean up all useless packages and some files && apt-get purge -y --allow-remove-essential python3-pip gzip bzip2 util-linux e2fsprogs \ libmagic1 bsdmainutils login ncurses-bin libmagic-mgc e2fslibs bsdutils \ exim4-config gnupg-agent dirmngr \ postgresql-server-dev-$PG_MAJOR git gcc make autoconf \ libc6-dev flex libicu-dev libkrb5-dev liblz4-dev \ libpam0g-dev libreadline-dev libselinux1-dev libssl-dev libxslt1-dev libzstd-dev uuid-dev \ && apt-get autoremove -y \ && apt-get clean -y \ && rm -rf /var/lib/apt/lists/* \ /root/.cache \ /var/cache/debconf/* \ /etc/rc?.d \ /etc/systemd \ /docker-entrypoint* \ /sbin/pam* \ /sbin/swap* \ /sbin/unix* \ /usr/local/bin/gosu \ /usr/sbin/[acgipr]* \ /usr/sbin/*user* \ /usr/share/doc* \ /usr/share/man \ /usr/share/info \ /usr/share/i18n/locales/translit_hangul \ /usr/share/locale/?? \ /usr/share/locale/??_?? \ /usr/share/postgresql/*/man \ /usr/share/postgresql-common/pg_wrapper \ /usr/share/vim/vim80/doc \ /usr/share/vim/vim80/lang \ /usr/share/vim/vim80/tutor \ # /var/lib/dpkg/info/* \ && find /usr/bin -xtype l -delete \ && find /var/log -type f -exec truncate --size 0 {} \; \ && find /usr/lib/python3/dist-packages -name '*test*' | xargs rm -fr \ && find /lib/$(uname -m)-linux-gnu/security -type f ! -name pam_env.so ! -name pam_permit.so ! -name pam_unix.so -delete # perform compression if it is necessary ARG COMPRESS RUN if [ "$COMPRESS" = "true" ]; then \ set -ex \ # Allow certain sudo commands from postgres && echo 'postgres ALL=(ALL) NOPASSWD: /bin/tar xpJf /a.tar.xz -C /, /bin/rm /a.tar.xz, /bin/ln -snf dash /bin/sh' >> /etc/sudoers \ && ln -snf busybox /bin/sh \ && arch=$(uname -m) \ && darch=$(uname -m | sed 's/_/-/') \ && files="/bin/sh /usr/bin/sudo /usr/lib/sudo/sudoers.so /lib/$arch-linux-gnu/security/pam_*.so" \ && libs="$(ldd $files | awk '{print $3;}' | grep '^/' | sort -u) /lib/ld-linux-$darch.so.* /lib/$arch-linux-gnu/ld-linux-$darch.so.* /lib/$arch-linux-gnu/libnsl.so.* /lib/$arch-linux-gnu/libnss_compat.so.* /lib/$arch-linux-gnu/libnss_files.so.*" \ && (echo /var/run $files $libs | tr ' ' '\n' && realpath $files $libs) | sort -u | sed 's/^\///' > /exclude \ && find /etc/alternatives -xtype l -delete \ && save_dirs="usr lib var bin sbin etc/ssl etc/init.d etc/alternatives etc/apt" \ && XZ_OPT=-e9v tar -X /exclude -cpJf a.tar.xz $save_dirs \ # we call "cat /exclude" to avoid including files from the $save_dirs that are also among # the exceptions listed in the /exclude, as "uniq -u" eliminates all non-unique lines. # By calling "cat /exclude" a second time we guarantee that there will be at least two lines # for each exception and therefore they will be excluded from the output passed to 'rm'. && /bin/busybox sh -c "(find $save_dirs -not -type d && cat /exclude /exclude && echo exclude) | sort | uniq -u | xargs /bin/busybox rm" \ && /bin/busybox --install -s \ && /bin/busybox sh -c "find $save_dirs -type d -depth -exec rmdir -p {} \; 2> /dev/null"; \ else \ /bin/busybox --install -s; \ fi FROM scratch COPY --from=builder / / LABEL maintainer="Alexander Kukushkin " ARG PG_MAJOR ARG COMPRESS ARG PGHOME ARG PGDATA ARG LC_ALL ARG LANG ARG PGBIN=/usr/lib/postgresql/$PG_MAJOR/bin ENV LC_ALL=$LC_ALL LANG=$LANG EDITOR=/usr/bin/editor ENV PGDATA=$PGDATA PATH=$PATH:$PGBIN COPY patroni /patroni/ COPY extras/confd/conf.d/haproxy.toml /etc/confd/conf.d/ COPY extras/confd/templates/haproxy-citus.tmpl /etc/confd/templates/haproxy.tmpl COPY patroni*.py docker/entrypoint.sh / COPY postgres?.yml $PGHOME/ WORKDIR $PGHOME RUN sed -i 's/env python/&3/' /patroni*.py \ # "fix" patroni configs && sed -i 's/^ listen: 127.0.0.1/ listen: 0.0.0.0/' postgres?.yml \ && sed -i "s|^\( data_dir: \).*|\1$PGDATA|" postgres?.yml \ && sed -i "s|^#\( bin_dir: \).*|\1$PGBIN|" postgres?.yml \ && sed -i 's/^ - encoding: UTF8/ - locale: en_US.UTF-8\n&/' postgres?.yml \ && sed -i 's/^scope:/log:\n loggers:\n patroni.postgresql.citus: DEBUG\n#&/' postgres?.yml \ && sed -i 's/^\(name\|etcd\| host\| authentication\| connect_address\| parameters\):/#&/' postgres?.yml \ && sed -i 's/^ \(replication\|superuser\|rewind\|unix_socket_directories\|\(\( \)\{0,1\}\(username\|password\)\)\):/#&/' postgres?.yml \ && sed -i 's/^postgresql:/&\n basebackup:\n checkpoint: fast/' postgres?.yml \ && sed -i 's|^ parameters:|&\n max_connections: 100\n shared_buffers: 16MB\n ssl: "on"\n ssl_ca_file: /etc/ssl/certs/ssl-cert-snakeoil.pem\n ssl_cert_file: /etc/ssl/certs/ssl-cert-snakeoil.pem\n ssl_key_file: /etc/ssl/private/ssl-cert-snakeoil.key\n citus.node_conninfo: "sslrootcert=/etc/ssl/certs/ssl-cert-snakeoil.pem sslkey=/etc/ssl/private/ssl-cert-snakeoil.key sslcert=/etc/ssl/certs/ssl-cert-snakeoil.pem sslmode=verify-ca"|' postgres?.yml \ && sed -i 's/^ pg_hba:/&\n - local all all trust/' postgres?.yml \ && sed -i 's/^\(.*\) \(.*\) \(.*\) \(.*\) \(.*\) md5.*$/\1 hostssl \3 \4 all md5 clientcert=verify-ca/' postgres?.yml \ && sed -i 's/^#\(ctl\| certfile\| keyfile\)/\1/' postgres?.yml \ && sed -i 's|^# cafile: .*$| verify_client: required\n cafile: /etc/ssl/certs/ssl-cert-snakeoil.pem|' postgres?.yml \ && sed -i 's|^# cacert: .*$| cacert: /etc/ssl/certs/ssl-cert-snakeoil.pem|' postgres?.yml \ && sed -i 's/^# insecure: .*/ insecure: on/' postgres?.yml \ # client cert for HAProxy to access Patroni REST API && if [ "$COMPRESS" = "true" ]; then chmod u+s /usr/bin/sudo; fi \ && chmod +s /bin/ping \ && chown -R postgres:postgres $PGHOME /run /etc/haproxy USER postgres ENTRYPOINT ["/bin/sh", "/entrypoint.sh"] patroni-3.2.2/LICENSE000066400000000000000000000020761455170150700142110ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2015 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. patroni-3.2.2/MAINTAINERS000066400000000000000000000001321455170150700146700ustar00rootroot00000000000000Alexander Kukushkin Polina Bungina patroni-3.2.2/MANIFEST.in000066400000000000000000000001031455170150700147270ustar00rootroot00000000000000include requirements* include *.rst recursive-include patroni *.py patroni-3.2.2/README.rst000066400000000000000000000207441455170150700146750ustar00rootroot00000000000000|Tests Status| |Coverage Status| Patroni: A Template for PostgreSQL HA with ZooKeeper, etcd or Consul -------------------------------------------------------------------- You can find a version of this documentation that is searchable and also easier to navigate at `patroni.readthedocs.io `__. There are many ways to run high availability with PostgreSQL; for a list, see the `PostgreSQL Documentation `__. Patroni is a template for high availability (HA) PostgreSQL solutions using Python. For maximum accessibility, Patroni supports a variety of distributed configuration stores like `ZooKeeper `__, `etcd `__, `Consul `__ or `Kubernetes `__. Database engineers, DBAs, DevOps engineers, and SREs who are looking to quickly deploy HA PostgreSQL in datacenters - or anywhere else - will hopefully find it useful. We call Patroni a "template" because it is far from being a one-size-fits-all or plug-and-play replication system. It will have its own caveats. Use wisely. Currently supported PostgreSQL versions: 9.3 to 16. **Note to Citus users**: Starting from 3.0 Patroni nicely integrates with the `Citus `__ database extension to Postgres. Please check the `Citus support page `__ in the Patroni documentation for more info about how to use Patroni high availability together with a Citus distributed cluster. **Note to Kubernetes users**: Patroni can run natively on top of Kubernetes. Take a look at the `Kubernetes `__ chapter of the Patroni documentation. .. contents:: :local: :depth: 1 :backlinks: none ================= How Patroni Works ================= Patroni originated as a fork of `Governor `__, the project from Compose. It includes plenty of new features. For an example of a Docker-based deployment with Patroni, see `Spilo `__, currently in use at Zalando. For additional background info, see: * `Elephants on Automatic: HA Clustered PostgreSQL with Helm `_, talk by Josh Berkus and Oleksii Kliukin at KubeCon Berlin 2017 * `PostgreSQL HA with Kubernetes and Patroni `__, talk by Josh Berkus at KubeCon 2016 (video) * `Feb. 2016 Zalando Tech blog post `__ ================== Development Status ================== Patroni is in active development and accepts contributions. See our `Contributing `__ section below for more details. We report new releases information `here `__. ========= Community ========= There are two places to connect with the Patroni community: `on github `__, via Issues and PRs, and on channel `#patroni `__ in the `PostgreSQL Slack `__. If you're using Patroni, or just interested, please join us. =================================== Technical Requirements/Installation =================================== **Pre-requirements for Mac OS** To install requirements on a Mac, run the following: :: brew install postgresql etcd haproxy libyaml python **Psycopg** Starting from `psycopg2-2.8 `__ the binary version of psycopg2 will no longer be installed by default. Installing it from the source code requires C compiler and postgres+python dev packages. Since in the python world it is not possible to specify dependency as ``psycopg2 OR psycopg2-binary`` you will have to decide how to install it. There are a few options available: 1. Use the package manager from your distro :: sudo apt-get install python3-psycopg2 # install psycopg2 module on Debian/Ubuntu sudo yum install python3-psycopg2 # install psycopg2 on RedHat/Fedora/CentOS 2. Specify one of `psycopg`, `psycopg2`, or `psycopg2-binary` in the list of dependencies when installing Patroni with pip (see below). **General installation for pip** Patroni can be installed with pip: :: pip install patroni[dependencies] where dependencies can be either empty, or consist of one or more of the following: etcd or etcd3 `python-etcd` module in order to use Etcd as DCS consul `python-consul` module in order to use Consul as DCS zookeeper `kazoo` module in order to use Zookeeper as DCS exhibitor `kazoo` module in order to use Exhibitor as DCS (same dependencies as for Zookeeper) kubernetes `kubernetes` module in order to use Kubernetes as DCS in Patroni raft `pysyncobj` module in order to use python Raft implementation as DCS aws `boto3` in order to use AWS callbacks all all of the above (except psycopg family) psycopg3 `psycopg[binary]>=3.0.0` module psycopg2 `psycopg2>=2.5.4` module psycopg2-binary `psycopg2-binary` module For example, the command in order to install Patroni together with psycopg3, dependencies for Etcd as a DCS, and AWS callbacks is: :: pip install patroni[psycopg3,etcd3,aws] Note that external tools to call in the replica creation or custom bootstrap scripts (i.e. WAL-E) should be installed independently of Patroni. ======================= Running and Configuring ======================= To get started, do the following from different terminals: :: > etcd --data-dir=data/etcd --enable-v2=true > ./patroni.py postgres0.yml > ./patroni.py postgres1.yml You will then see a high-availability cluster start up. Test different settings in the YAML files to see how the cluster's behavior changes. Kill some of the components to see how the system behaves. Add more ``postgres*.yml`` files to create an even larger cluster. Patroni provides an `HAProxy `__ configuration, which will give your application a single endpoint for connecting to the cluster's leader. To configure, run: :: > haproxy -f haproxy.cfg :: > psql --host 127.0.0.1 --port 5000 postgres ================== YAML Configuration ================== Go `here `__ for comprehensive information about settings for etcd, consul, and ZooKeeper. And for an example, see `postgres0.yml `__. ========================= Environment Configuration ========================= Go `here `__ for comprehensive information about configuring(overriding) settings via environment variables. =================== Replication Choices =================== Patroni uses Postgres' streaming replication, which is asynchronous by default. Patroni's asynchronous replication configuration allows for ``maximum_lag_on_failover`` settings. This setting ensures failover will not occur if a follower is more than a certain number of bytes behind the leader. This setting should be increased or decreased based on business requirements. It's also possible to use synchronous replication for better durability guarantees. See `replication modes documentation `__ for details. ====================================== Applications Should Not Use Superusers ====================================== When connecting from an application, always use a non-superuser. Patroni requires access to the database to function properly. By using a superuser from an application, you can potentially use the entire connection pool, including the connections reserved for superusers, with the ``superuser_reserved_connections`` setting. If Patroni cannot access the Primary because the connection pool is full, behavior will be undesirable. .. |Tests Status| image:: https://github.com/zalando/patroni/actions/workflows/tests.yaml/badge.svg :target: https://github.com/zalando/patroni/actions/workflows/tests.yaml?query=branch%3Amaster .. |Coverage Status| image:: https://coveralls.io/repos/zalando/patroni/badge.svg?branch=master :target: https://coveralls.io/github/zalando/patroni?branch=master patroni-3.2.2/docker-compose-citus.yml000066400000000000000000000106401455170150700177620ustar00rootroot00000000000000# docker compose file for running a Citus cluster # with 3-node etcd v3 cluster as the DCS and one haproxy node. # The Citus cluster has a coordinator (3 nodes) # and two worker clusters (2 nodes). # # Before starting it up you need to build the docker image: # $ docker build -f Dockerfile.citus -t patroni-citus . # The cluster could be started as: # $ docker-compose -f docker-compose-citus.yml up -d # You can read more about it in the: # https://github.com/zalando/patroni/blob/master/docker/README.md#citus-cluster version: "2" networks: demo: services: etcd1: &etcd image: ${PATRONI_TEST_IMAGE:-patroni-citus} networks: [ demo ] environment: ETCDCTL_API: 3 ETCD_LISTEN_PEER_URLS: http://0.0.0.0:2380 ETCD_LISTEN_CLIENT_URLS: http://0.0.0.0:2379 ETCD_INITIAL_CLUSTER: etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380 ETCD_INITIAL_CLUSTER_STATE: new ETCD_INITIAL_CLUSTER_TOKEN: tutorial ETCD_UNSUPPORTED_ARCH: arm64 container_name: demo-etcd1 hostname: etcd1 command: etcd -name etcd1 -initial-advertise-peer-urls http://etcd1:2380 etcd2: <<: *etcd container_name: demo-etcd2 hostname: etcd2 command: etcd -name etcd2 -initial-advertise-peer-urls http://etcd2:2380 etcd3: <<: *etcd container_name: demo-etcd3 hostname: etcd3 command: etcd -name etcd3 -initial-advertise-peer-urls http://etcd3:2380 haproxy: image: ${PATRONI_TEST_IMAGE:-patroni-citus} networks: [ demo ] env_file: docker/patroni.env hostname: haproxy container_name: demo-haproxy ports: - "5000:5000" # Access to the coorinator primary - "5001:5001" # Load-balancing across workers primaries command: haproxy environment: &haproxy_env ETCDCTL_API: 3 ETCDCTL_ENDPOINTS: http://etcd1:2379,http://etcd2:2379,http://etcd3:2379 PATRONI_ETCD3_HOSTS: "'etcd1:2379','etcd2:2379','etcd3:2379'" PATRONI_SCOPE: demo PATRONI_CITUS_GROUP: 0 PATRONI_CITUS_DATABASE: citus PGSSLMODE: verify-ca PGSSLKEY: /etc/ssl/private/ssl-cert-snakeoil.key PGSSLCERT: /etc/ssl/certs/ssl-cert-snakeoil.pem PGSSLROOTCERT: /etc/ssl/certs/ssl-cert-snakeoil.pem coord1: image: ${PATRONI_TEST_IMAGE:-patroni-citus} networks: [ demo ] env_file: docker/patroni.env hostname: coord1 container_name: demo-coord1 environment: &coord_env <<: *haproxy_env PATRONI_NAME: coord1 PATRONI_CITUS_GROUP: 0 coord2: image: ${PATRONI_TEST_IMAGE:-patroni-citus} networks: [ demo ] env_file: docker/patroni.env hostname: coord2 container_name: demo-coord2 environment: <<: *coord_env PATRONI_NAME: coord2 coord3: image: ${PATRONI_TEST_IMAGE:-patroni-citus} networks: [ demo ] env_file: docker/patroni.env hostname: coord3 container_name: demo-coord3 environment: <<: *coord_env PATRONI_NAME: coord3 work1-1: image: ${PATRONI_TEST_IMAGE:-patroni-citus} networks: [ demo ] env_file: docker/patroni.env hostname: work1-1 container_name: demo-work1-1 environment: &work1_env <<: *haproxy_env PATRONI_NAME: work1-1 PATRONI_CITUS_GROUP: 1 work1-2: image: ${PATRONI_TEST_IMAGE:-patroni-citus} networks: [ demo ] env_file: docker/patroni.env hostname: work1-2 container_name: demo-work1-2 environment: <<: *work1_env PATRONI_NAME: work1-2 work2-1: image: ${PATRONI_TEST_IMAGE:-patroni-citus} networks: [ demo ] env_file: docker/patroni.env hostname: work2-1 container_name: demo-work2-1 environment: &work2_env <<: *haproxy_env PATRONI_NAME: work2-1 PATRONI_CITUS_GROUP: 2 work2-2: image: ${PATRONI_TEST_IMAGE:-patroni-citus} networks: [ demo ] env_file: docker/patroni.env hostname: work2-2 container_name: demo-work2-2 environment: <<: *work2_env PATRONI_NAME: work2-2 patroni-3.2.2/docker-compose.yml000066400000000000000000000051771455170150700166460ustar00rootroot00000000000000# docker compose file for running a 3-node PostgreSQL cluster # with 3-node etcd cluster as the DCS and one haproxy node # # requires a patroni image build from the Dockerfile: # $ docker build -t patroni . # The cluster could be started as: # $ docker-compose up -d # You can read more about it in the: # https://github.com/zalando/patroni/blob/master/docker/README.md version: "2" networks: demo: services: etcd1: &etcd image: ${PATRONI_TEST_IMAGE:-patroni} networks: [ demo ] environment: ETCD_LISTEN_PEER_URLS: http://0.0.0.0:2380 ETCD_LISTEN_CLIENT_URLS: http://0.0.0.0:2379 ETCD_INITIAL_CLUSTER: etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380 ETCD_INITIAL_CLUSTER_STATE: new ETCD_INITIAL_CLUSTER_TOKEN: tutorial ETCD_UNSUPPORTED_ARCH: arm64 container_name: demo-etcd1 hostname: etcd1 command: etcd -name etcd1 -initial-advertise-peer-urls http://etcd1:2380 etcd2: <<: *etcd container_name: demo-etcd2 hostname: etcd2 command: etcd -name etcd2 -initial-advertise-peer-urls http://etcd2:2380 etcd3: <<: *etcd container_name: demo-etcd3 hostname: etcd3 command: etcd -name etcd3 -initial-advertise-peer-urls http://etcd3:2380 haproxy: image: ${PATRONI_TEST_IMAGE:-patroni} networks: [ demo ] env_file: docker/patroni.env hostname: haproxy container_name: demo-haproxy ports: - "5000:5000" - "5001:5001" command: haproxy environment: &haproxy_env ETCDCTL_ENDPOINTS: http://etcd1:2379,http://etcd2:2379,http://etcd3:2379 PATRONI_ETCD3_HOSTS: "'etcd1:2379','etcd2:2379','etcd3:2379'" PATRONI_SCOPE: demo patroni1: image: ${PATRONI_TEST_IMAGE:-patroni} networks: [ demo ] env_file: docker/patroni.env hostname: patroni1 container_name: demo-patroni1 environment: <<: *haproxy_env PATRONI_NAME: patroni1 patroni2: image: ${PATRONI_TEST_IMAGE:-patroni} networks: [ demo ] env_file: docker/patroni.env hostname: patroni2 container_name: demo-patroni2 environment: <<: *haproxy_env PATRONI_NAME: patroni2 patroni3: image: ${PATRONI_TEST_IMAGE:-patroni} networks: [ demo ] env_file: docker/patroni.env hostname: patroni3 container_name: demo-patroni3 environment: <<: *haproxy_env PATRONI_NAME: patroni3 patroni-3.2.2/docker/000077500000000000000000000000001455170150700144465ustar00rootroot00000000000000patroni-3.2.2/docker/README.md000066400000000000000000000443501455170150700157330ustar00rootroot00000000000000# Dockerfile and Dockerfile.citus You can run Patroni in a docker container using these Dockerfiles They are meant in aiding development of Patroni and quick testing of features and not a production-worthy! docker build -t patroni . docker build -f Dockerfile.citus -t patroni-citus . # Examples ## Standalone Patroni docker run -d patroni ## Three-node Patroni cluster In addition to three Patroni containers the stack starts three containers with etcd (forming a three-node cluster), and one container with haproxy. The haproxy listens on ports 5000 (connects to the primary) and 5001 (does load-balancing between healthy standbys). Example session: $ docker-compose up -d Creating demo-haproxy ... Creating demo-patroni2 ... Creating demo-patroni1 ... Creating demo-patroni3 ... Creating demo-etcd2 ... Creating demo-etcd1 ... Creating demo-etcd3 ... Creating demo-haproxy Creating demo-patroni2 Creating demo-patroni1 Creating demo-patroni3 Creating demo-etcd1 Creating demo-etcd2 Creating demo-etcd2 ... done $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 5b7a90b4cfbf patroni "/bin/sh /entrypoint…" 29 seconds ago Up 27 seconds demo-etcd2 e30eea5222f2 patroni "/bin/sh /entrypoint…" 29 seconds ago Up 27 seconds demo-etcd1 83bcf3cb208f patroni "/bin/sh /entrypoint…" 29 seconds ago Up 27 seconds demo-etcd3 922532c56e7d patroni "/bin/sh /entrypoint…" 29 seconds ago Up 28 seconds demo-patroni3 14f875e445f3 patroni "/bin/sh /entrypoint…" 29 seconds ago Up 28 seconds demo-patroni2 110d1073b383 patroni "/bin/sh /entrypoint…" 29 seconds ago Up 28 seconds demo-patroni1 5af5e6e36028 patroni "/bin/sh /entrypoint…" 29 seconds ago Up 28 seconds 0.0.0.0:5000-5001->5000-5001/tcp demo-haproxy $ docker logs demo-patroni1 2019-02-20 08:19:32,714 INFO: Failed to import patroni.dcs.consul 2019-02-20 08:19:32,737 INFO: Selected new etcd server http://etcd3:2379 2019-02-20 08:19:35,140 INFO: Lock owner: None; I am patroni1 2019-02-20 08:19:35,174 INFO: trying to bootstrap a new cluster ... 2019-02-20 08:19:39,310 INFO: postmaster pid=37 2019-02-20 08:19:39.314 UTC [37] LOG: listening on IPv4 address "0.0.0.0", port 5432 2019-02-20 08:19:39.321 UTC [37] LOG: listening on Unix socket "/var/run/postgresql/.s.PGSQL.5432" 2019-02-20 08:19:39.353 UTC [39] LOG: database system was shut down at 2019-02-20 08:19:36 UTC 2019-02-20 08:19:39.354 UTC [40] FATAL: the database system is starting up localhost:5432 - rejecting connections 2019-02-20 08:19:39.369 UTC [37] LOG: database system is ready to accept connections localhost:5432 - accepting connections 2019-02-20 08:19:39,383 INFO: establishing a new patroni connection to the postgres cluster 2019-02-20 08:19:39,408 INFO: running post_bootstrap 2019-02-20 08:19:39,432 WARNING: Could not activate Linux watchdog device: "Can't open watchdog device: [Errno 2] No such file or directory: '/dev/watchdog'" 2019-02-20 08:19:39,515 INFO: initialized a new cluster 2019-02-20 08:19:49,424 INFO: Lock owner: patroni1; I am patroni1 2019-02-20 08:19:49,447 INFO: Lock owner: patroni1; I am patroni1 2019-02-20 08:19:49,480 INFO: no action. i am the leader with the lock 2019-02-20 08:19:59,422 INFO: Lock owner: patroni1; I am patroni1 $ docker exec -ti demo-patroni1 bash postgres@patroni1:~$ patronictl list +---------+----------+------------+--------+---------+----+-----------+ | Cluster | Member | Host | Role | State | TL | Lag in MB | +---------+----------+------------+--------+---------+----+-----------+ | demo | patroni1 | 172.22.0.3 | Leader | running | 1 | 0 | | demo | patroni2 | 172.22.0.7 | | running | 1 | 0 | | demo | patroni3 | 172.22.0.4 | | running | 1 | 0 | +---------+----------+------------+--------+---------+----+-----------+ postgres@patroni1:~$ etcdctl get --keys-only --prefix /service/demo /service/demo/config /service/demo/initialize /service/demo/leader /service/demo/members/ /service/demo/members/patroni1 /service/demo/members/patroni2 /service/demo/members/patroni3 /service/demo/optime/ /service/demo/optime/leader postgres@patroni1:~$ etcdctl member list 1bab629f01fa9065: name=etcd3 peerURLs=http://etcd3:2380 clientURLs=http://etcd3:2379 isLeader=false 8ecb6af518d241cc: name=etcd2 peerURLs=http://etcd2:2380 clientURLs=http://etcd2:2379 isLeader=true b2e169fcb8a34028: name=etcd1 peerURLs=http://etcd1:2380 clientURLs=http://etcd1:2379 isLeader=false postgres@patroni1:~$ exit $ docker exec -ti demo-haproxy bash postgres@haproxy:~$ psql -h localhost -p 5000 -U postgres -W Password: postgres psql (11.2 (Ubuntu 11.2-1.pgdg18.04+1), server 10.7 (Debian 10.7-1.pgdg90+1)) Type "help" for help. localhost/postgres=# select pg_is_in_recovery(); pg_is_in_recovery ─────────────────── f (1 row) localhost/postgres=# \q $postgres@haproxy:~ psql -h localhost -p 5001 -U postgres -W Password: postgres psql (11.2 (Ubuntu 11.2-1.pgdg18.04+1), server 10.7 (Debian 10.7-1.pgdg90+1)) Type "help" for help. localhost/postgres=# select pg_is_in_recovery(); pg_is_in_recovery ─────────────────── t (1 row) ## Citus cluster The stack starts three containers with etcd (forming a three-node etcd cluster), seven containers with Patroni+PostgreSQL+Citus (three coordinator nodes, and two worker clusters with two nodes each), and one container with haproxy. The haproxy listens on ports 5000 (connects to the coordinator primary) and 5001 (does load-balancing between worker primary nodes). Example session: $ docker-compose -f docker-compose-citus.yml up -d Creating demo-work2-1 ... done Creating demo-work1-1 ... done Creating demo-etcd2 ... done Creating demo-etcd1 ... done Creating demo-coord3 ... done Creating demo-etcd3 ... done Creating demo-coord1 ... done Creating demo-haproxy ... done Creating demo-work2-2 ... done Creating demo-coord2 ... done Creating demo-work1-2 ... done $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 852d8885a612 patroni-citus "/bin/sh /entrypoint…" 6 seconds ago Up 3 seconds demo-coord3 cdd692f947ab patroni-citus "/bin/sh /entrypoint…" 6 seconds ago Up 3 seconds demo-work1-2 9f4e340b36da patroni-citus "/bin/sh /entrypoint…" 6 seconds ago Up 3 seconds demo-etcd3 d69c129a960a patroni-citus "/bin/sh /entrypoint…" 6 seconds ago Up 4 seconds demo-etcd1 c5849689b8cd patroni-citus "/bin/sh /entrypoint…" 6 seconds ago Up 4 seconds demo-coord1 c9d72bd6217d patroni-citus "/bin/sh /entrypoint…" 6 seconds ago Up 3 seconds demo-work2-1 24b1b43efa05 patroni-citus "/bin/sh /entrypoint…" 6 seconds ago Up 4 seconds demo-coord2 cb0cc2b4ca0a patroni-citus "/bin/sh /entrypoint…" 6 seconds ago Up 3 seconds demo-work2-2 9796c6b8aad5 patroni-citus "/bin/sh /entrypoint…" 6 seconds ago Up 5 seconds demo-work1-1 8baccd74dcae patroni-citus "/bin/sh /entrypoint…" 6 seconds ago Up 4 seconds demo-etcd2 353ec62a0187 patroni-citus "/bin/sh /entrypoint…" 6 seconds ago Up 4 seconds 0.0.0.0:5000-5001->5000-5001/tcp demo-haproxy $ docker logs demo-coord1 2023-01-05 15:09:31,295 INFO: Selected new etcd server http://172.27.0.4:2379 2023-01-05 15:09:31,388 INFO: Lock owner: None; I am coord1 2023-01-05 15:09:31,501 INFO: trying to bootstrap a new cluster ... 2023-01-05 15:09:45,096 INFO: postmaster pid=39 localhost:5432 - no response 2023-01-05 15:09:45.137 UTC [39] LOG: starting PostgreSQL 15.1 (Debian 15.1-1.pgdg110+1) on x86_64-pc-linux-gnu, compiled by gcc (Debian 10.2.1-6) 10.2.1 20210110, 64-bit 2023-01-05 15:09:45.137 UTC [39] LOG: listening on IPv4 address "0.0.0.0", port 5432 2023-01-05 15:09:45.152 UTC [39] LOG: listening on Unix socket "/var/run/postgresql/.s.PGSQL.5432" 2023-01-05 15:09:45.177 UTC [43] LOG: database system was shut down at 2023-01-05 15:09:32 UTC 2023-01-05 15:09:45.193 UTC [39] LOG: database system is ready to accept connections localhost:5432 - accepting connections localhost:5432 - accepting connections 2023-01-05 15:09:46,139 INFO: establishing a new patroni connection to the postgres cluster 2023-01-05 15:09:46,208 INFO: running post_bootstrap 2023-01-05 15:09:47.209 UTC [55] LOG: starting maintenance daemon on database 16386 user 10 2023-01-05 15:09:47.209 UTC [55] CONTEXT: Citus maintenance daemon for database 16386 user 10 2023-01-05 15:09:47,215 WARNING: Could not activate Linux watchdog device: "Can't open watchdog device: [Errno 2] No such file or directory: '/dev/watchdog'" 2023-01-05 15:09:47.446 UTC [41] LOG: checkpoint starting: immediate force wait 2023-01-05 15:09:47,466 INFO: initialized a new cluster 2023-01-05 15:09:47,594 DEBUG: query(SELECT nodeid, groupid, nodename, nodeport, noderole FROM pg_catalog.pg_dist_node WHERE noderole = 'primary', ()) 2023-01-05 15:09:47,594 INFO: establishing a new patroni connection to the postgres cluster 2023-01-05 15:09:47,467 INFO: Lock owner: coord1; I am coord1 2023-01-05 15:09:47,613 DEBUG: query(SELECT pg_catalog.citus_set_coordinator_host(%s, %s, 'primary', 'default'), ('172.27.0.6', 5432)) 2023-01-05 15:09:47,924 INFO: no action. I am (coord1), the leader with the lock 2023-01-05 15:09:51.282 UTC [41] LOG: checkpoint complete: wrote 1086 buffers (53.0%); 0 WAL file(s) added, 0 removed, 0 recycled; write=0.029 s, sync=3.746 s, total=3.837 s; sync files=280, longest=0.028 s, average=0.014 s; distance=8965 kB, estimate=8965 kB 2023-01-05 15:09:51.283 UTC [41] LOG: checkpoint starting: immediate force wait 2023-01-05 15:09:51.495 UTC [41] LOG: checkpoint complete: wrote 18 buffers (0.9%); 0 WAL file(s) added, 0 removed, 0 recycled; write=0.044 s, sync=0.091 s, total=0.212 s; sync files=15, longest=0.015 s, average=0.007 s; distance=67 kB, estimate=8076 kB 2023-01-05 15:09:57,467 INFO: Lock owner: coord1; I am coord1 2023-01-05 15:09:57,569 INFO: Assigning synchronous standby status to ['coord3'] server signaled 2023-01-05 15:09:57.574 UTC [39] LOG: received SIGHUP, reloading configuration files 2023-01-05 15:09:57.580 UTC [39] LOG: parameter "synchronous_standby_names" changed to "coord3" 2023-01-05 15:09:59,637 INFO: Synchronous standby status assigned to ['coord3'] 2023-01-05 15:09:59,638 DEBUG: query(SELECT pg_catalog.citus_add_node(%s, %s, %s, 'primary', 'default'), ('172.27.0.2', 5432, 1)) 2023-01-05 15:09:59.690 UTC [67] LOG: standby "coord3" is now a synchronous standby with priority 1 2023-01-05 15:09:59.690 UTC [67] STATEMENT: START_REPLICATION SLOT "coord3" 0/3000000 TIMELINE 1 2023-01-05 15:09:59,694 INFO: no action. I am (coord1), the leader with the lock 2023-01-05 15:09:59,704 DEBUG: query(SELECT pg_catalog.citus_add_node(%s, %s, %s, 'primary', 'default'), ('172.27.0.8', 5432, 2)) 2023-01-05 15:10:07,625 INFO: no action. I am (coord1), the leader with the lock 2023-01-05 15:10:17,579 INFO: no action. I am (coord1), the leader with the lock $ docker exec -ti demo-haproxy bash postgres@haproxy:~$ etcdctl member list 1bab629f01fa9065, started, etcd3, http://etcd3:2380, http://172.27.0.10:2379 8ecb6af518d241cc, started, etcd2, http://etcd2:2380, http://172.27.0.4:2379 b2e169fcb8a34028, started, etcd1, http://etcd1:2380, http://172.27.0.7:2379 postgres@haproxy:~$ etcdctl get --keys-only --prefix /service/demo /service/demo/0/config /service/demo/0/initialize /service/demo/0/leader /service/demo/0/members/coord1 /service/demo/0/members/coord2 /service/demo/0/members/coord3 /service/demo/0/status /service/demo/0/sync /service/demo/1/config /service/demo/1/initialize /service/demo/1/leader /service/demo/1/members/work1-1 /service/demo/1/members/work1-2 /service/demo/1/status /service/demo/1/sync /service/demo/2/config /service/demo/2/initialize /service/demo/2/leader /service/demo/2/members/work2-1 /service/demo/2/members/work2-2 /service/demo/2/status /service/demo/2/sync postgres@haproxy:~$ psql -h localhost -p 5000 -U postgres -d citus Password for user postgres: postgres psql (15.1 (Debian 15.1-1.pgdg110+1)) SSL connection (protocol: TLSv1.3, cipher: TLS_AES_256_GCM_SHA384, compression: off) Type "help" for help. citus=# select pg_is_in_recovery(); pg_is_in_recovery ------------------- f (1 row) citus=# table pg_dist_node; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards --------+---------+------------+----------+----------+-------------+----------+----------+-------------+----------------+------------------ 1 | 0 | 172.27.0.6 | 5432 | default | t | t | primary | default | t | f 2 | 1 | 172.27.0.2 | 5432 | default | t | t | primary | default | t | t 3 | 2 | 172.27.0.8 | 5432 | default | t | t | primary | default | t | t (3 rows) citus=# \q postgres@haproxy:~$ patronictl list + Citus cluster: demo ----------+--------------+---------+----+-----------+ | Group | Member | Host | Role | State | TL | Lag in MB | +-------+---------+-------------+--------------+---------+----+-----------+ | 0 | coord1 | 172.27.0.6 | Leader | running | 1 | | | 0 | coord2 | 172.27.0.5 | Replica | running | 1 | 0 | | 0 | coord3 | 172.27.0.9 | Sync Standby | running | 1 | 0 | | 1 | work1-1 | 172.27.0.2 | Leader | running | 1 | | | 1 | work1-2 | 172.27.0.12 | Sync Standby | running | 1 | 0 | | 2 | work2-1 | 172.27.0.11 | Sync Standby | running | 1 | 0 | | 2 | work2-2 | 172.27.0.8 | Leader | running | 1 | | +-------+---------+-------------+--------------+---------+----+-----------+ postgres@haproxy:~$ patronictl switchover --group 2 --force Current cluster topology + Citus cluster: demo (group: 2, 7185185529556963355) +-----------+ | Member | Host | Role | State | TL | Lag in MB | +---------+-------------+--------------+---------+----+-----------+ | work2-1 | 172.27.0.11 | Sync Standby | running | 1 | 0 | | work2-2 | 172.27.0.8 | Leader | running | 1 | | +---------+-------------+--------------+---------+----+-----------+ 2023-01-05 15:29:29.54204 Successfully switched over to "work2-1" + Citus cluster: demo (group: 2, 7185185529556963355) -------+ | Member | Host | Role | State | TL | Lag in MB | +---------+-------------+---------+---------+----+-----------+ | work2-1 | 172.27.0.11 | Leader | running | 1 | | | work2-2 | 172.27.0.8 | Replica | stopped | | unknown | +---------+-------------+---------+---------+----+-----------+ postgres@haproxy:~$ patronictl list + Citus cluster: demo ----------+--------------+---------+----+-----------+ | Group | Member | Host | Role | State | TL | Lag in MB | +-------+---------+-------------+--------------+---------+----+-----------+ | 0 | coord1 | 172.27.0.6 | Leader | running | 1 | | | 0 | coord2 | 172.27.0.5 | Replica | running | 1 | 0 | | 0 | coord3 | 172.27.0.9 | Sync Standby | running | 1 | 0 | | 1 | work1-1 | 172.27.0.2 | Leader | running | 1 | | | 1 | work1-2 | 172.27.0.12 | Sync Standby | running | 1 | 0 | | 2 | work2-1 | 172.27.0.11 | Leader | running | 2 | | | 2 | work2-2 | 172.27.0.8 | Sync Standby | running | 2 | 0 | +-------+---------+-------------+--------------+---------+----+-----------+ postgres@haproxy:~$ psql -h localhost -p 5000 -U postgres -d citus Password for user postgres: postgres psql (15.1 (Debian 15.1-1.pgdg110+1)) SSL connection (protocol: TLSv1.3, cipher: TLS_AES_256_GCM_SHA384, compression: off) Type "help" for help. citus=# table pg_dist_node; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards --------+---------+-------------+----------+----------+-------------+----------+----------+-------------+----------------+------------------ 1 | 0 | 172.27.0.6 | 5432 | default | t | t | primary | default | t | f 3 | 2 | 172.27.0.11 | 5432 | default | t | t | primary | default | t | t 2 | 1 | 172.27.0.2 | 5432 | default | t | t | primary | default | t | t (3 rows) patroni-3.2.2/docker/entrypoint.sh000077500000000000000000000061751455170150700172310ustar00rootroot00000000000000#!/bin/sh if [ -f /a.tar.xz ]; then echo "decompressing image..." sudo tar xpJf /a.tar.xz -C / > /dev/null 2>&1 sudo rm /a.tar.xz sudo ln -snf dash /bin/sh fi readonly PATRONI_SCOPE="${PATRONI_SCOPE:-batman}" PATRONI_NAMESPACE="${PATRONI_NAMESPACE:-/service}" readonly PATRONI_NAMESPACE="${PATRONI_NAMESPACE%/}" DOCKER_IP=$(hostname --ip-address) readonly DOCKER_IP case "$1" in haproxy) haproxy -f /etc/haproxy/haproxy.cfg -p /var/run/haproxy.pid -D set -- confd "-prefix=$PATRONI_NAMESPACE/$PATRONI_SCOPE" -interval=10 -backend if [ -n "$PATRONI_ZOOKEEPER_HOSTS" ]; then while ! /usr/share/zookeeper/bin/zkCli.sh -server "$PATRONI_ZOOKEEPER_HOSTS" ls /; do sleep 1 done set -- "$@" zookeeper -node "$PATRONI_ZOOKEEPER_HOSTS" else while ! etcdctl member list 2> /dev/null; do sleep 1 done set -- "$@" etcdv3 while IFS='' read -r line; do set -- "$@" -node "$line" done <<-EOT $(echo "$ETCDCTL_ENDPOINTS" | sed 's/,/\n/g') EOT fi exec dumb-init "$@" ;; etcd) exec "$@" -advertise-client-urls "http://$DOCKER_IP:2379" ;; zookeeper) exec /usr/share/zookeeper/bin/zkServer.sh start-foreground ;; esac ## We start an etcd if [ -z "$PATRONI_ETCD3_HOSTS" ] && [ -z "$PATRONI_ZOOKEEPER_HOSTS" ]; then export PATRONI_ETCD_URL="http://127.0.0.1:2379" etcd --data-dir /tmp/etcd.data -advertise-client-urls=$PATRONI_ETCD_URL -listen-client-urls=http://0.0.0.0:2379 > /var/log/etcd.log 2> /var/log/etcd.err & fi export PATRONI_SCOPE export PATRONI_NAMESPACE export PATRONI_NAME="${PATRONI_NAME:-$(hostname)}" export PATRONI_RESTAPI_CONNECT_ADDRESS="$DOCKER_IP:8008" export PATRONI_RESTAPI_LISTEN="0.0.0.0:8008" export PATRONI_admin_PASSWORD="${PATRONI_admin_PASSWORD:-admin}" export PATRONI_admin_OPTIONS="${PATRONI_admin_OPTIONS:-createdb, createrole}" export PATRONI_POSTGRESQL_CONNECT_ADDRESS="$DOCKER_IP:5432" export PATRONI_POSTGRESQL_LISTEN="0.0.0.0:5432" export PATRONI_POSTGRESQL_DATA_DIR="${PATRONI_POSTGRESQL_DATA_DIR:-$PGDATA}" export PATRONI_REPLICATION_USERNAME="${PATRONI_REPLICATION_USERNAME:-replicator}" export PATRONI_REPLICATION_PASSWORD="${PATRONI_REPLICATION_PASSWORD:-replicate}" export PATRONI_SUPERUSER_USERNAME="${PATRONI_SUPERUSER_USERNAME:-postgres}" export PATRONI_SUPERUSER_PASSWORD="${PATRONI_SUPERUSER_PASSWORD:-postgres}" export PATRONI_REPLICATION_SSLMODE="${PATRONI_REPLICATION_SSLMODE:-$PGSSLMODE}" export PATRONI_REPLICATION_SSLKEY="${PATRONI_REPLICATION_SSLKEY:-$PGSSLKEY}" export PATRONI_REPLICATION_SSLCERT="${PATRONI_REPLICATION_SSLCERT:-$PGSSLCERT}" export PATRONI_REPLICATION_SSLROOTCERT="${PATRONI_REPLICATION_SSLROOTCERT:-$PGSSLROOTCERT}" export PATRONI_SUPERUSER_SSLMODE="${PATRONI_SUPERUSER_SSLMODE:-$PGSSLMODE}" export PATRONI_SUPERUSER_SSLKEY="${PATRONI_SUPERUSER_SSLKEY:-$PGSSLKEY}" export PATRONI_SUPERUSER_SSLCERT="${PATRONI_SUPERUSER_SSLCERT:-$PGSSLCERT}" export PATRONI_SUPERUSER_SSLROOTCERT="${PATRONI_SUPERUSER_SSLROOTCERT:-$PGSSLROOTCERT}" exec python3 /patroni.py postgres0.yml patroni-3.2.2/docker/patroni.env000066400000000000000000000004341455170150700166350ustar00rootroot00000000000000PATRONI_RESTAPI_USERNAME=admin PATRONI_RESTAPI_PASSWORD=admin PATRONI_SUPERUSER_USERNAME=postgres PATRONI_SUPERUSER_PASSWORD=postgres PATRONI_REPLICATION_USERNAME=replicator PATRONI_REPLICATION_PASSWORD=replicate PATRONI_admin_PASSWORD=admin PATRONI_admin_OPTIONS=createdb,createrole patroni-3.2.2/docs/000077500000000000000000000000001455170150700141275ustar00rootroot00000000000000patroni-3.2.2/docs/CONTRIBUTING.rst000066400000000000000000000003271455170150700165720ustar00rootroot00000000000000.. _contributing: Contributing ============ Resources and information for developers can be found in the pages below. .. toctree:: :maxdepth: 2 contributing_guidelines Patroni API docs patroni-3.2.2/docs/ENVIRONMENT.rst000066400000000000000000000730611455170150700164740ustar00rootroot00000000000000.. _environment: Environment Configuration Settings ================================== It is possible to override some of the configuration parameters defined in the Patroni configuration file using the system environment variables. This document lists all environment variables handled by Patroni. The values set via those variables always take precedence over the ones set in the Patroni configuration file. Global/Universal ---------------- - **PATRONI\_CONFIGURATION**: it is possible to set the entire configuration for the Patroni via ``PATRONI_CONFIGURATION`` environment variable. In this case any other environment variables will not be considered! - **PATRONI\_NAME**: name of the node where the current instance of Patroni is running. Must be unique for the cluster. - **PATRONI\_NAMESPACE**: path within the configuration store where Patroni will keep information about the cluster. Default value: "/service" - **PATRONI\_SCOPE**: cluster name Log --- - **PATRONI\_LOG\_LEVEL**: sets the general logging level. Default value is **INFO** (see `the docs for Python logging `_) - **PATRONI\_LOG\_TRACEBACK\_LEVEL**: sets the level where tracebacks will be visible. Default value is **ERROR**. Set it to **DEBUG** if you want to see tracebacks only if you enable **PATRONI\_LOG\_LEVEL=DEBUG**. - **PATRONI\_LOG\_FORMAT**: sets the log formatting string. Default value is **%(asctime)s %(levelname)s: %(message)s** (see `the LogRecord attributes `_) - **PATRONI\_LOG\_DATEFORMAT**: sets the datetime formatting string. (see the `formatTime() documentation `_) - **PATRONI\_LOG\_MAX\_QUEUE\_SIZE**: Patroni is using two-step logging. Log records are written into the in-memory queue and there is a separate thread which pulls them from the queue and writes to stderr or file. The maximum size of the internal queue is limited by default by **1000** records, which is enough to keep logs for the past 1h20m. - **PATRONI\_LOG\_DIR**: Directory to write application logs to. The directory must exist and be writable by the user executing Patroni. If you set this env variable, the application will retain 4 25MB logs by default. You can tune those retention values with `PATRONI_LOG_FILE_NUM` and `PATRONI_LOG_FILE_SIZE` (see below). - **PATRONI\_LOG\_FILE\_NUM**: The number of application logs to retain. - **PATRONI\_LOG\_FILE\_SIZE**: Size of patroni.log file (in bytes) that triggers a log rolling. - **PATRONI\_LOG\_LOGGERS**: Redefine logging level per python module. Example ``PATRONI_LOG_LOGGERS="{patroni.postmaster: WARNING, urllib3: DEBUG}"`` Citus ----- Enables integration Patroni with `Citus `__. If configured, Patroni will take care of registering Citus worker nodes on the coordinator. You can find more information about Citus support :ref:`here `. - **PATRONI\_CITUS\_GROUP**: the Citus group id, integer. Use ``0`` for coordinator and ``1``, ``2``, etc... for workers - **PATRONI\_CITUS\_DATABASE**: the database where ``citus`` extension should be created. Must be the same on the coordinator and all workers. Currently only one database is supported. Consul ------ - **PATRONI\_CONSUL\_HOST**: the host:port for the Consul local agent. - **PATRONI\_CONSUL\_URL**: url for the Consul local agent, in format: http(s)://host:port - **PATRONI\_CONSUL\_PORT**: (optional) Consul port - **PATRONI\_CONSUL\_SCHEME**: (optional) **http** or **https**, defaults to **http** - **PATRONI\_CONSUL\_TOKEN**: (optional) ACL token - **PATRONI\_CONSUL\_VERIFY**: (optional) whether to verify the SSL certificate for HTTPS requests - **PATRONI\_CONSUL\_CACERT**: (optional) The ca certificate. If present it will enable validation. - **PATRONI\_CONSUL\_CERT**: (optional) File with the client certificate - **PATRONI\_CONSUL\_KEY**: (optional) File with the client key. Can be empty if the key is part of certificate. - **PATRONI\_CONSUL\_DC**: (optional) Datacenter to communicate with. By default the datacenter of the host is used. - **PATRONI\_CONSUL\_CONSISTENCY**: (optional) Select consul consistency mode. Possible values are ``default``, ``consistent``, or ``stale`` (more details in `consul API reference `__) - **PATRONI\_CONSUL\_CHECKS**: (optional) list of Consul health checks used for the session. By default an empty list is used. - **PATRONI\_CONSUL\_REGISTER\_SERVICE**: (optional) whether or not to register a service with the name defined by the scope parameter and the tag master, primary, replica, or standby-leader depending on the node's role. Defaults to **false** - **PATRONI\_CONSUL\_SERVICE\_TAGS**: (optional) additional static tags to add to the Consul service apart from the role (``master``/``primary``/``replica``/``standby-leader``). By default an empty list is used. - **PATRONI\_CONSUL\_SERVICE\_CHECK\_INTERVAL**: (optional) how often to perform health check against registered url - **PATRONI\_CONSUL\_SERVICE\_CHECK\_TLS\_SERVER\_NAME**: (optional) overide SNI host when connecting via TLS, see also `consul agent check API reference `__. Etcd ---- - **PATRONI\_ETCD\_PROXY**: proxy url for the etcd. If you are connecting to the etcd using proxy, use this parameter instead of **PATRONI\_ETCD\_URL** - **PATRONI\_ETCD\_URL**: url for the etcd, in format: http(s)://(username:password@)host:port - **PATRONI\_ETCD\_HOSTS**: list of etcd endpoints in format 'host1:port1','host2:port2',etc... - **PATRONI\_ETCD\_USE\_PROXIES**: If this parameter is set to true, Patroni will consider **hosts** as a list of proxies and will not perform a topology discovery of etcd cluster but stick to a fixed list of **hosts**. - **PATRONI\_ETCD\_PROTOCOL**: http or https, if not specified http is used. If the **url** or **proxy** is specified - will take protocol from them. - **PATRONI\_ETCD\_HOST**: the host:port for the etcd endpoint. - **PATRONI\_ETCD\_SRV**: Domain to search the SRV record(s) for cluster autodiscovery. Patroni will try to query these SRV service names for specified domain (in that order until first success): ``_etcd-client-ssl``, ``_etcd-client``, ``_etcd-ssl``, ``_etcd``, ``_etcd-server-ssl``, ``_etcd-server``. If SRV records for ``_etcd-server-ssl`` or ``_etcd-server`` are retrieved then ETCD peer protocol is used do query ETCD for available members. Otherwise hosts from SRV records will be used. - **PATRONI\_ETCD\_SRV\_SUFFIX**: Configures a suffix to the SRV name that is queried during discovery. Use this flag to differentiate between multiple etcd clusters under the same domain. Works only with conjunction with **PATRONI\_ETCD\_SRV**. For example, if ``PATRONI_ETCD_SRV_SUFFIX=foo`` and ``PATRONI_ETCD_SRV=example.org`` are set, the following DNS SRV query is made:``_etcd-client-ssl-foo._tcp.example.com`` (and so on for every possible ETCD SRV service name). - **PATRONI\_ETCD\_USERNAME**: username for etcd authentication. - **PATRONI\_ETCD\_PASSWORD**: password for etcd authentication. - **PATRONI\_ETCD\_CACERT**: The ca certificate. If present it will enable validation. - **PATRONI\_ETCD\_CERT**: File with the client certificate. - **PATRONI\_ETCD\_KEY**: File with the client key. Can be empty if the key is part of certificate. Etcdv3 ------ Environment names for Etcdv3 are similar as for Etcd, you just need to use ``ETCD3`` instead of ``ETCD`` in the variable name. Example: ``PATRONI_ETCD3_HOST``, ``PATRONI_ETCD3_CACERT``, and so on. .. warning:: Keys created with protocol version 2 are not visible with protocol version 3 and the other way around, therefore it is not possible to switch from Etcd to Etcdv3 just by updating Patroni configuration. ZooKeeper --------- - **PATRONI\_ZOOKEEPER\_HOSTS**: Comma separated list of ZooKeeper cluster members: "'host1:port1','host2:port2','etc...'". It is important to quote every single entity! - **PATRONI\_ZOOKEEPER\_USE\_SSL**: (optional) Whether SSL is used or not. Defaults to ``false``. If set to ``false``, all SSL specific parameters are ignored. - **PATRONI\_ZOOKEEPER\_CACERT**: (optional) The CA certificate. If present it will enable validation. - **PATRONI\_ZOOKEEPER\_CERT**: (optional) File with the client certificate. - **PATRONI\_ZOOKEEPER\_KEY**: (optional) File with the client key. - **PATRONI\_ZOOKEEPER\_KEY\_PASSWORD**: (optional) The client key password. - **PATRONI\_ZOOKEEPER\_VERIFY**: (optional) Whether to verify certificate or not. Defaults to ``true``. - **PATRONI\_ZOOKEEPER\_SET\_ACLS**: (optional) If set, configure Kazoo to apply a default ACL to each ZNode that it creates. ACLs will assume 'x509' schema and should be specified as a dictionary with the principal as the key and one or more permissions as a list in the value. Permissions may be one of ``CREATE``, ``READ``, ``WRITE``, ``DELETE`` or ``ADMIN``. For example, ``set_acls: {CN=principal1: [CREATE, READ], CN=principal2: [ALL]}``. .. note:: It is required to install ``kazoo>=2.6.0`` to support SSL. Exhibitor --------- - **PATRONI\_EXHIBITOR\_HOSTS**: initial list of Exhibitor (ZooKeeper) nodes in format: 'host1,host2,etc...'. This list updates automatically whenever the Exhibitor (ZooKeeper) cluster topology changes. - **PATRONI\_EXHIBITOR\_PORT**: Exhibitor port. .. _kubernetes_environment: Kubernetes ---------- - **PATRONI\_KUBERNETES\_BYPASS\_API\_SERVICE**: (optional) When communicating with the Kubernetes API, Patroni is usually relying on the `kubernetes` service, the address of which is exposed in the pods via the `KUBERNETES_SERVICE_HOST` environment variable. If `PATRONI_KUBERNETES_BYPASS_API_SERVICE` is set to ``true``, Patroni will resolve the list of API nodes behind the service and connect directly to them. - **PATRONI\_KUBERNETES\_NAMESPACE**: (optional) Kubernetes namespace where the Patroni pod is running. Default value is `default`. - **PATRONI\_KUBERNETES\_LABELS**: Labels in format ``{label1: value1, label2: value2}``. These labels will be used to find existing objects (Pods and either Endpoints or ConfigMaps) associated with the current cluster. Also Patroni will set them on every object (Endpoint or ConfigMap) it creates. - **PATRONI\_KUBERNETES\_SCOPE\_LABEL**: (optional) name of the label containing cluster name. Default value is `cluster-name`. - **PATRONI\_KUBERNETES\_ROLE\_LABEL**: (optional) name of the label containing role (master or replica or other custom value). Patroni will set this label on the pod it runs in. Default value is ``role``. - **PATRONI\_KUBERNETES\_LEADER\_LABEL\_VALUE**: (optional) value of the pod label when Postgres role is `master`. Default value is `master`. - **PATRONI\_KUBERNETES\_FOLLOWER\_LABEL\_VALUE**: (optional) value of the pod label when Postgres role is `replica`. Default value is `replica`. - **PATRONI\_KUBERNETES\_STANDBY\_LEADER\_LABEL\_VALUE**: (optional) value of the pod label when Postgres role is ``standby_leader``. Default value is ``master``. - **PATRONI\_KUBERNETES\_TMP\_ROLE\_LABEL**: (optional) name of the temporary label containing role (master or replica). Value of this label will always use the default of corresponding role. Set only when necessary. - **PATRONI\_KUBERNETES\_USE\_ENDPOINTS**: (optional) if set to true, Patroni will use Endpoints instead of ConfigMaps to run leader elections and keep cluster state. - **PATRONI\_KUBERNETES\_POD\_IP**: (optional) IP address of the pod Patroni is running in. This value is required when `PATRONI_KUBERNETES_USE_ENDPOINTS` is enabled and is used to populate the leader endpoint subsets when the pod's PostgreSQL is promoted. - **PATRONI\_KUBERNETES\_PORTS**: (optional) if the Service object has the name for the port, the same name must appear in the Endpoint object, otherwise service won't work. For example, if your service is defined as ``{Kind: Service, spec: {ports: [{name: postgresql, port: 5432, targetPort: 5432}]}}``, then you have to set ``PATRONI_KUBERNETES_PORTS='[{"name": "postgresql", "port": 5432}]'`` and Patroni will use it for updating subsets of the leader Endpoint. This parameter is used only if `PATRONI_KUBERNETES_USE_ENDPOINTS` is set. - **PATRONI\_KUBERNETES\_CACERT**: (optional) Specifies the file with the CA_BUNDLE file with certificates of trusted CAs to use while verifying Kubernetes API SSL certs. If not provided, patroni will use the value provided by the ServiceAccount secret. - **PATRONI\_RETRIABLE\_HTTP\_CODES**: (optional) list of HTTP status codes from K8s API to retry on. By default Patroni is retrying on ``500``, ``503``, and ``504``, or if K8s API response has ``retry-after`` HTTP header. Raft (deprecated) ----------------- - **PATRONI\_RAFT\_SELF\_ADDR**: ``ip:port`` to listen on for Raft connections. The ``self_addr`` must be accessible from other nodes of the cluster. If not set, the node will not participate in consensus. - **PATRONI\_RAFT\_BIND\_ADDR**: (optional) ``ip:port`` to listen on for Raft connections. If not specified the ``self_addr`` will be used. - **PATRONI\_RAFT\_PARTNER\_ADDRS**: list of other Patroni nodes in the cluster in format ``"'ip1:port1','ip2:port2'"``. It is important to quote every single entity! - **PATRONI\_RAFT\_DATA\_DIR**: directory where to store Raft log and snapshot. If not specified the current working directory is used. - **PATRONI\_RAFT\_PASSWORD**: (optional) Encrypt Raft traffic with a specified password, requires ``cryptography`` python module. PostgreSQL ---------- - **PATRONI\_POSTGRESQL\_LISTEN**: IP address + port that Postgres listens to. Multiple comma-separated addresses are permitted, as long as the port component is appended after to the last one with a colon, i.e. ``listen: 127.0.0.1,127.0.0.2:5432``. Patroni will use the first address from this list to establish local connections to the PostgreSQL node. - **PATRONI\_POSTGRESQL\_CONNECT\_ADDRESS**: IP address + port through which Postgres is accessible from other nodes and applications. - **PATRONI\_POSTGRESQL\_PROXY\_ADDRESS**: IP address + port through which a connection pool (e.g. pgbouncer) running next to Postgres is accessible. The value is written to the member key in DCS as ``proxy_url`` and could be used/useful for service discovery. - **PATRONI\_POSTGRESQL\_DATA\_DIR**: The location of the Postgres data directory, either existing or to be initialized by Patroni. - **PATRONI\_POSTGRESQL\_CONFIG\_DIR**: The location of the Postgres configuration directory, defaults to the data directory. Must be writable by Patroni. - **PATRONI\_POSTGRESQL\_BIN_DIR**: Path to PostgreSQL binaries. (pg_ctl, initdb, pg_controldata, pg_basebackup, postgres, pg_isready, pg_rewind) The default value is an empty string meaning that PATH environment variable will be used to find the executables. - **PATRONI\_POSTGRESQL\_BIN\_PG\_CTL**: (optional) Custom name for ``pg_ctl`` binary. - **PATRONI\_POSTGRESQL\_BIN\_INITDB**: (optional) Custom name for ``initdb`` binary. - **PATRONI\_POSTGRESQL\_BIN\_PG\_CONTROLDATA**: (optional) Custom name for ``pg_controldata`` binary. - **PATRONI\_POSTGRESQL\_BIN\_PG\_BASEBACKUP**: (optional) Custom name for ``pg_basebackup`` binary. - **PATRONI\_POSTGRESQL\_BIN\_POSTGRES**: (optional) Custom name for ``postgres`` binary. - **PATRONI\_POSTGRESQL\_BIN\_IS\_READY**: (optional) Custom name for ``pg_isready`` binary. - **PATRONI\_POSTGRESQL\_BIN\_PG\_REWIND**: (optional) Custom name for ``pg_rewind`` binary. - **PATRONI\_POSTGRESQL\_PGPASS**: path to the `.pgpass `__ password file. Patroni creates this file before executing pg\_basebackup and under some other circumstances. The location must be writable by Patroni. - **PATRONI\_REPLICATION\_USERNAME**: replication username; the user will be created during initialization. Replicas will use this user to access the replication source via streaming replication - **PATRONI\_REPLICATION\_PASSWORD**: replication password; the user will be created during initialization. - **PATRONI\_REPLICATION\_SSLMODE**: (optional) maps to the `sslmode `__ connection parameter, which allows a client to specify the type of TLS negotiation mode with the server. For more information on how each mode works, please visit the `PostgreSQL documentation `__. The default mode is ``prefer``. - **PATRONI\_REPLICATION\_SSLKEY**: (optional) maps to the `sslkey `__ connection parameter, which specifies the location of the secret key used with the client's certificate. - **PATRONI\_REPLICATION\_SSLPASSWORD**: (optional) maps to the `sslpassword `__ connection parameter, which specifies the password for the secret key specified in ``PATRONI_REPLICATION_SSLKEY``. - **PATRONI\_REPLICATION\_SSLCERT**: (optional) maps to the `sslcert `__ connection parameter, which specifies the location of the client certificate. - **PATRONI\_REPLICATION\_SSLROOTCERT**: (optional) maps to the `sslrootcert `__ connection parameter, which specifies the location of a file containing one ore more certificate authorities (CA) certificates that the client will use to verify a server's certificate. - **PATRONI\_REPLICATION\_SSLCRL**: (optional) maps to the `sslcrl `__ connection parameter, which specifies the location of a file containing a certificate revocation list. A client will reject connecting to any server that has a certificate present in this list. - **PATRONI\_REPLICATION\_SSLCRLDIR**: (optional) maps to the `sslcrldir `__ connection parameter, which specifies the location of a directory with files containing a certificate revocation list. A client will reject connecting to any server that has a certificate present in this list. - **PATRONI\_REPLICATION\_GSSENCMODE**: (optional) maps to the `gssencmode `__ connection parameter, which determines whether or with what priority a secure GSS TCP/IP connection will be negotiated with the server - **PATRONI\_REPLICATION\_CHANNEL\_BINDING**: (optional) maps to the `channel_binding `__ connection parameter, which controls the client's use of channel binding. - **PATRONI\_SUPERUSER\_USERNAME**: name for the superuser, set during initialization (initdb) and later used by Patroni to connect to the postgres. Also this user is used by pg_rewind. - **PATRONI\_SUPERUSER\_PASSWORD**: password for the superuser, set during initialization (initdb). - **PATRONI\_SUPERUSER\_SSLMODE**: (optional) maps to the `sslmode `__ connection parameter, which allows a client to specify the type of TLS negotiation mode with the server. For more information on how each mode works, please visit the `PostgreSQL documentation `__. The default mode is ``prefer``. - **PATRONI\_SUPERUSER\_SSLKEY**: (optional) maps to the `sslkey `__ connection parameter, which specifies the location of the secret key used with the client's certificate. - **PATRONI\_SUPERUSER\_SSLPASSWORD**: (optional) maps to the `sslpassword `__ connection parameter, which specifies the password for the secret key specified in ``PATRONI_SUPERUSER_SSLKEY``. - **PATRONI\_SUPERUSER\_SSLCERT**: (optional) maps to the `sslcert `__ connection parameter, which specifies the location of the client certificate. - **PATRONI\_SUPERUSER\_SSLROOTCERT**: (optional) maps to the `sslrootcert `__ connection parameter, which specifies the location of a file containing one ore more certificate authorities (CA) certificates that the client will use to verify a server's certificate. - **PATRONI\_SUPERUSER\_SSLCRL**: (optional) maps to the `sslcrl `__ connection parameter, which specifies the location of a file containing a certificate revocation list. A client will reject connecting to any server that has a certificate present in this list. - **PATRONI\_SUPERUSER\_SSLCRLDIR**: (optional) maps to the `sslcrldir `__ connection parameter, which specifies the location of a directory with files containing a certificate revocation list. A client will reject connecting to any server that has a certificate present in this list. - **PATRONI\_SUPERUSER\_GSSENCMODE**: (optional) maps to the `gssencmode `__ connection parameter, which determines whether or with what priority a secure GSS TCP/IP connection will be negotiated with the server - **PATRONI\_SUPERUSER\_CHANNEL\_BINDING**: (optional) maps to the `channel_binding `__ connection parameter, which controls the client's use of channel binding. - **PATRONI\_REWIND\_USERNAME**: (optional) name for the user for ``pg_rewind``; the user will be created during initialization of postgres 11+ and all necessary `permissions `__ will be granted. - **PATRONI\_REWIND\_PASSWORD**: (optional) password for the user for ``pg_rewind``; the user will be created during initialization. - **PATRONI\_REWIND\_SSLMODE**: (optional) maps to the `sslmode `__ connection parameter, which allows a client to specify the type of TLS negotiation mode with the server. For more information on how each mode works, please visit the `PostgreSQL documentation `__. The default mode is ``prefer``. - **PATRONI\_REWIND\_SSLKEY**: (optional) maps to the `sslkey `__ connection parameter, which specifies the location of the secret key used with the client's certificate. - **PATRONI\_REWIND\_SSLPASSWORD**: (optional) maps to the `sslpassword `__ connection parameter, which specifies the password for the secret key specified in ``PATRONI_REWIND_SSLKEY``. - **PATRONI\_REWIND\_SSLCERT**: (optional) maps to the `sslcert `__ connection parameter, which specifies the location of the client certificate. - **PATRONI\_REWIND\_SSLROOTCERT**: (optional) maps to the `sslrootcert `__ connection parameter, which specifies the location of a file containing one ore more certificate authorities (CA) certificates that the client will use to verify a server's certificate. - **PATRONI\_REWIND\_SSLCRL**: (optional) maps to the `sslcrl `__ connection parameter, which specifies the location of a file containing a certificate revocation list. A client will reject connecting to any server that has a certificate present in this list. - **PATRONI\_REWIND\_SSLCRLDIR**: (optional) maps to the `sslcrldir `__ connection parameter, which specifies the location of a directory with files containing a certificate revocation list. A client will reject connecting to any server that has a certificate present in this list. - **PATRONI\_REWIND\_GSSENCMODE**: (optional) maps to the `gssencmode `__ connection parameter, which determines whether or with what priority a secure GSS TCP/IP connection will be negotiated with the server - **PATRONI\_REWIND\_CHANNEL\_BINDING**: (optional) maps to the `channel_binding `__ connection parameter, which controls the client's use of channel binding. REST API -------- - **PATRONI\_RESTAPI\_CONNECT\_ADDRESS**: IP address and port to access the REST API. - **PATRONI\_RESTAPI\_LISTEN**: IP address and port that Patroni will listen to, to provide health-check information for HAProxy. - **PATRONI\_RESTAPI\_USERNAME**: Basic-auth username to protect unsafe REST API endpoints. - **PATRONI\_RESTAPI\_PASSWORD**: Basic-auth password to protect unsafe REST API endpoints. - **PATRONI\_RESTAPI\_CERTFILE**: Specifies the file with the certificate in the PEM format. If the certfile is not specified or is left empty, the API server will work without SSL. - **PATRONI\_RESTAPI\_KEYFILE**: Specifies the file with the secret key in the PEM format. - **PATRONI\_RESTAPI\_KEYFILE\_PASSWORD**: Specifies a password for decrypting the keyfile. - **PATRONI\_RESTAPI\_CAFILE**: Specifies the file with the CA_BUNDLE with certificates of trusted CAs to use while verifying client certs. - **PATRONI\_RESTAPI\_CIPHERS**: (optional) Specifies the permitted cipher suites (e.g. "ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES128-GCM-SHA256:!SSLv1:!SSLv2:!SSLv3:!TLSv1:!TLSv1.1") - **PATRONI\_RESTAPI\_VERIFY\_CLIENT**: ``none`` (default), ``optional`` or ``required``. When ``none`` REST API will not check client certificates. When ``required`` client certificates are required for all REST API calls. When ``optional`` client certificates are required for all unsafe REST API endpoints. When ``required`` is used, then client authentication succeeds, if the certificate signature verification succeeds. For ``optional`` the client cert will only be checked for ``PUT``, ``POST``, ``PATCH``, and ``DELETE`` requests. - **PATRONI\_RESTAPI\_ALLOWLIST**: (optional): Specifies the set of hosts that are allowed to call unsafe REST API endpoints. The single element could be a host name, an IP address or a network address using CIDR notation. By default ``allow all`` is used. In case if ``allowlist`` or ``allowlist_include_members`` are set, anything that is not included is rejected. - **PATRONI\_RESTAPI\_ALLOWLIST\_INCLUDE\_MEMBERS**: (optional): If set to ``true`` it allows accessing unsafe REST API endpoints from other cluster members registered in DCS (IP address or hostname is taken from the members ``api_url``). Be careful, it might happen that OS will use a different IP for outgoing connections. - **PATRONI\_RESTAPI\_HTTP\_EXTRA\_HEADERS**: (optional) HTTP headers let the REST API server pass additional information with an HTTP response. - **PATRONI\_RESTAPI\_HTTPS\_EXTRA\_HEADERS**: (optional) HTTPS headers let the REST API server pass additional information with an HTTP response when TLS is enabled. This will also pass additional information set in ``http_extra_headers``. - **PATRONI\_RESTAPI\_REQUEST\_QUEUE\_SIZE**: (optional): Sets request queue size for TCP socket used by Patroni REST API. Once the queue is full, further requests get a "Connection denied" error. The default value is 5. .. warning:: - The ``PATRONI_RESTAPI_CONNECT_ADDRESS`` must be accessible from all nodes of a given Patroni cluster. Internally Patroni is using it during the leader race to find nodes with minimal replication lag. - If you enabled client certificates validation (``PATRONI_RESTAPI_VERIFY_CLIENT`` is set to ``required``), you also **must** provide **valid client certificates** in the ``PATRONI_CTL_CERTFILE``, ``PATRONI_CTL_KEYFILE``, ``PATRONI_CTL_KEYFILE_PASSWORD``. If not provided, Patroni will not work correctly. CTL --- - **PATRONICTL\_CONFIG\_FILE**: (optional) location of the configuration file. - **PATRONI\_CTL\_USERNAME**: (optional) Basic-auth username for accessing protected REST API endpoints. If not provided :ref:`patronictl` will use the value provided for REST API "username" parameter. - **PATRONI\_CTL\_PASSWORD**: (optional) Basic-auth password for accessing protected REST API endpoints. If not provided :ref:`patronictl` will use the value provided for REST API "password" parameter. - **PATRONI\_CTL\_INSECURE**: (optional) Allow connections to REST API without verifying SSL certs. - **PATRONI\_CTL\_CACERT**: (optional) Specifies the file with the CA_BUNDLE file or directory with certificates of trusted CAs to use while verifying REST API SSL certs. If not provided :ref:`patronictl` will use the value provided for REST API "cafile" parameter. - **PATRONI\_CTL\_CERTFILE**: (optional) Specifies the file with the client certificate in the PEM format. - **PATRONI\_CTL\_KEYFILE**: (optional) Specifies the file with the client secret key in the PEM format. - **PATRONI\_CTL\_KEYFILE\_PASSWORD**: (optional) Specifies a password for decrypting the client keyfile. patroni-3.2.2/docs/Makefile000066400000000000000000000011341455170150700155660ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXPROJ = Patroni SOURCEDIR = . BUILDDIR = build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) patroni-3.2.2/docs/README.rst000066400000000000000000000124631455170150700156240ustar00rootroot00000000000000.. _readme: ============ Introduction ============ Patroni is a template for high availability (HA) PostgreSQL solutions using Python. Patroni originated as a fork of `Governor `__, the project from Compose. It includes plenty of new features. For an example of a Docker-based deployment with Patroni, see `Spilo `__, currently in use at Zalando. For additional background info, see: * `PostgreSQL HA with Kubernetes and Patroni `__, talk by Josh Berkus at KubeCon 2016 (video) * `Feb. 2016 Zalando Tech blog post `__ Development Status ------------------ Patroni is in active development and accepts contributions. See our :ref:`Contributing ` section below for more details. We report new releases information :ref:`here `. Technical Requirements/Installation ----------------------------------- Go :ref:`here ` for guidance on installing and upgrading Patroni on various platforms. .. _running_configuring: Planning the Number of PostgreSQL Nodes --------------------------------------- Patroni/PostgreSQL nodes are decoupled from DCS nodes (except when Patroni implements RAFT on its own) and therefore there is no requirement on the minimal number of nodes. Running a cluster consisting of one primary and one standby is perfectly fine. You can add more standby nodes later. Running and Configuring ----------------------- The following section assumes Patroni repository as being cloned from https://github.com/zalando/patroni. Namely, you will need example configuration files `postgres0.yml` and `postgres1.yml`. If you installed Patroni with pip, you can obtain those files from the git repository and replace `./patroni.py` below with `patroni` command. To get started, do the following from different terminals: :: > etcd --data-dir=data/etcd --enable-v2=true > ./patroni.py postgres0.yml > ./patroni.py postgres1.yml You will then see a high-availability cluster start up. Test different settings in the YAML files to see how the cluster's behavior changes. Kill some of the components to see how the system behaves. Add more ``postgres*.yml`` files to create an even larger cluster. Patroni provides an `HAProxy `__ configuration, which will give your application a single endpoint for connecting to the cluster's leader. To configure, run: :: > haproxy -f haproxy.cfg :: > psql --host 127.0.0.1 --port 5000 postgres YAML Configuration ------------------ Go :ref:`here ` for comprehensive information about settings for etcd, consul, and ZooKeeper. And for an example, see `postgres0.yml `__. Environment Configuration ------------------------- Go :ref:`here ` for comprehensive information about configuring(overriding) settings via environment variables. Replication Choices ------------------- Patroni uses Postgres' streaming replication, which is asynchronous by default. Patroni's asynchronous replication configuration allows for ``maximum_lag_on_failover`` settings. This setting ensures failover will not occur if a follower is more than a certain number of bytes behind the leader. This setting should be increased or decreased based on business requirements. It's also possible to use synchronous replication for better durability guarantees. See :ref:`replication modes documentation ` for details. Applications Should Not Use Superusers -------------------------------------- When connecting from an application, always use a non-superuser. Patroni requires access to the database to function properly. By using a superuser from an application, you can potentially use the entire connection pool, including the connections reserved for superusers, with the ``superuser_reserved_connections`` setting. If Patroni cannot access the Primary because the connection pool is full, behavior will be undesirable. Testing Your HA Solution -------------------------------------- Testing an HA solution is a time consuming process, with many variables. This is particularly true considering a cross-platform application. You need a trained system administrator or a consultant to do this work. It is not something we can cover in depth in the documentation. That said, here are some pieces of your infrastructure you should be sure to test: * Network (the network in front of your system as well as the NICs [physical or virtual] themselves) * Disk IO * file limits (nofile in Linux) * RAM. Even if you have oomkiller turned off, the unavailability of RAM could cause issues. * CPU * Virtualization Contention (overcommitting the hypervisor) * Any cgroup limitation (likely to be related to the above) * ``kill -9`` of any postgres process (except postmaster!). This is a decent simulation of a segfault. One thing that you should not do is run ``kill -9`` on a postmaster process. This is because doing so does not mimic any real life scenario. If you are concerned your infrastructure is insecure and an attacker could run ``kill -9``, no amount of HA process is going to fix that. The attacker will simply kill the process again, or cause chaos in another way. patroni-3.2.2/docs/_static/000077500000000000000000000000001455170150700155555ustar00rootroot00000000000000patroni-3.2.2/docs/_static/custom.css000066400000000000000000000000371455170150700176010ustar00rootroot00000000000000li { margin-bottom: 0.5em } patroni-3.2.2/docs/_static/multi-dc-asynchronous-replication.drawio000066400000000000000000000047151455170150700255510ustar00rootroot000000000000007Vxtb9s2EP41BrYPNfTmt4+Jk2bFOixrihXYF4O2aEkNLaoUZTv99SMlUhZF+i2RE8dVEsDiiTxKd88deXeMO+54sb4jIAn/wj5EHcfy1x33puM4tud67INTngrKkLc4ISCRLzptCA/RTyiIlqBmkQ9TpSPFGNEoUYkzHMdwRhUaIASv1G5zjNRZExBAjfAwA0infot8Ggqq3R9tbvwBoyCk8v0GxY0FkJ3Fm6Qh8PGqQnJvO+6YYEyLq8V6DBEXnpRLMe7jlrvlgxEY00MG/L3474v99d/Hb+jnn596d99vsjj7ILgsAcrEC9+MhYJS+iSFkOAoprkge9fsj80ztjo9dmfMW12nVyPU2wOVYOstzkMl1NsDlWDX2du1+e36A1YIWkthb9XmtyoPyP7ca5xRFMVwXELOYsSAAD9iqhhjhAmjxThm0rsO6QKxls0uV2FE4UMCZlyqK2YujDbHMRWgtx3ZFoLnXBmsKWBzEcEj1wQkt0tYKKTogxBI0mhajiJwlpE0WsIvMC2YcyoDYMKvF+uA22oXrFKvGxCcJfnjf2JzGe9O2OVkhnDmcyaU4EcoX7LjuOz3Iwfc9TxCqPbyS0hoxGzpCkUB500xnwqIFoJzyjkyiURx8Dlv3biWkIJpCh+kIfTF6+j4l2Bms8J1hSTs4Q7iBaTkiXURd3vSNoVz8kRztbF0T9LCipF7chwQ3iUoWW8MkF0IGzzCHh3NHu8Bk3gcaTZpELemm95VfzzsVwVnb9VKHXk1HZSsTCiugFzXyk6/c7CqbHvAzXq3shyrpyur7Ni4slxdWXd8TJxSEDP5OH3EAT4l7CrIoc7o/vRJ02X6COksFII3epdtFrHF6xxmpYw+z3/qpiUR8hlMIbrHaUSj3DdMMaV4sdewZ5D7KBUX+xwdSJPibefRGvrbvBWBKc7IDBa+ivm51OS1/OlE6mAiRX5CZI5UJzLQcdk3+JD+qVDpHYtKAoHPOhCYIKbTFpyvB04u+YmU+wkR2lMRar81RHstRFuIKhB13DODaF+D6O3X8Q0PNFGWcu04lh4nKTisKE8BR76BSmthgIoqK/8x4bDEWz0k61pWHmR1+24t+BLxVY06MlKLOK3Wc7SF8SAfze4bmNg1mjOs9c0Dqb12olmE2XDqWH/MppDEkIm5GxVIT2R4wxTkn8zPDlUQu44O4qEpnBieCMQDDcQtZFvIViHbPzPEDjWAQj+AcqHDhIY4wDFAtxvqNcFZ7JdY3fT5jLmkczR/h5Q+ieUTZBS/JGYtVtAd/URmkAISwF38xBLDX3CnqghEgEZLNSFpkrwYes/trLK01r2S56ksigcVo2r6Kx/j+SodtU6odUI7nRDT23l5IVl8eNduaPBWbuhlorcvQPT9A0U/Oi/R6475ckU/OC/R66nkG74WFHkQtujFil76PzJeNSyWxA9iTbziYiQwF6nsIPMn7JMtgqPiChWUjwVb2aEt+bUlv03Jb4ZJggmgcOIDCiblPmJ7iel05T+9itVQ+c9Ttx1vX/2zDbn7yyz/ucfqyrbPrfpnt1nsS89iH49Sr16lfvNEtq0nAVuY/uIwdZzzg+lQg6lWcNFDwzZxdFGJo+P97blVXOw229mC9p3VXJyzznYK8e5N/JSnw/dlfuRKc+l1lzIebl1R64reS+XFgNF36IvkLuANfNHLpO9ehPSHB0pfyvFcpO/9UtKXRvL60n+kftr/Z/jtGi7DW39l/0juoeEwv8yM+NGyUkepJUsq9RRDv5xkKNzwhMKHIk/Pyzb9ZN3ZUrY5fjqVxGc65BFsd88zHMzIa4pRrylG+8R7MKNBU4yGTTEaNcSIeYCGGNlNMXKaYtQUsp1tyL6vWGXBTDPWX5qsuKV6BJIHDPJfax11bapvk+cIr2YhILTLq5JTkPIV0FSROtmG2altmAc9bb/slrnV6o6510Di1Lhu6QfVj1x87KYMbSsjY73hQLic2as0xigh0QJw9e+UhnHhf4aVMXRT1bT2BqqLyPeLHSY/UAA2Jw3UQJ5HxXxTKQ4d5Far5ABEkcdQr65UWdjW95RVuUGt2OHqUe7IFOWeymbPZKNvOFO1a2u8d0fvntWGXi/PS2MJ3WearjVDIE2VQTSiCL6Cw9l6BixntBKo5axiTBYAGZk9UALBIooD1u1LUWbME1iO9RtIn+JZSHCMs/T3ildRD4nt80FcsltcEEdFnjY7nR/SEb7L+jT/UX6JiJikU/2eDpNfsbqW5wwV1yJt4Lm5Y9kFz+cpPDItzJqbbxMpum++k8W9/R8=patroni-3.2.2/docs/_static/multi-dc-asynchronous-replication.png000066400000000000000000001172011455170150700250430ustar00rootroot00000000000000‰PNG  IHDR™¥pq2Ù ÐtEXtmxfile%3Cmxfile%20host%3D%22app.diagrams.net%22%20modified%3D%222023-03-13T14%3A29%3A14.439Z%22%20agent%3D%225.0%20(X11%3B%20Ubuntu)%22%20etag%3D%22OufOFJhmd-sLSoEhEUfs%22%20version%3D%2221.0.6%22%20type%3D%22device%22%3E%3Cdiagram%20name%3D%22Page-1%22%20id%3D%22Xu3tU9JEMeQEUPilRV_D%22%3E7Vxtb9s2EP41BrYPNfTmt4%2BJk2bFOixrihXYF4O2aEkNLaoUZTv99SMlUhZF%2Bi2RE8dVEsDiiTxKd88deXeMO%2B54sb4jIAn%2Fwj5EHcfy1x33puM4tud67INTngrKkLc4ISCRLzptCA%2FRTyiIlqBmkQ9TpSPFGNEoUYkzHMdwRhUaIASv1G5zjNRZExBAjfAwA0infot8Ggqq3R9tbvwBoyCk8v0GxY0FkJ3Fm6Qh8PGqQnJvO%2B6YYEyLq8V6DBEXnpRLMe7jlrvlgxEY00MG%2FL3474v99d%2FHb%2Bjnn596d99vsjj7ILgsAcrEC9%2BMhYJS%2BiSFkOAoprkge9fsj80ztjo9dmfMW12nVyPU2wOVYOstzkMl1NsDlWDX2du1%2Be36A1YIWkthb9XmtyoPyP7ca5xRFMVwXELOYsSAAD9iqhhjhAmjxThm0rsO6QKxls0uV2FE4UMCZlyqK2YujDbHMRWgtx3ZFoLnXBmsKWBzEcEj1wQkt0tYKKTogxBI0mhajiJwlpE0WsIvMC2YcyoDYMKvF%2BuA22oXrFKvGxCcJfnjf2JzGe9O2OVkhnDmcyaU4EcoX7LjuOz3Iwfc9TxCqPbyS0hoxGzpCkUB500xnwqIFoJzyjkyiURx8Dlv3biWkIJpCh%2BkIfTF6%2Bj4l2Bms8J1hSTs4Q7iBaTkiXURd3vSNoVz8kRztbF0T9LCipF7chwQ3iUoWW8MkF0IGzzCHh3NHu8Bk3gcaTZpELemm95VfzzsVwVnb9VKHXk1HZSsTCiugFzXyk6%2Fc7CqbHvAzXq3shyrpyur7Ni4slxdWXd8TJxSEDP5OH3EAT4l7CrIoc7o%2FvRJ02X6COksFII3epdtFrHF6xxmpYw%2Bz3%2FqpiUR8hlMIbrHaUSj3DdMMaV4sdewZ5D7KBUX%2BxwdSJPibefRGvrbvBWBKc7IDBa%2Bivm51OS1%2FOlE6mAiRX5CZI5UJzLQcdk3%2BJD%2BqVDpHYtKAoHPOhCYIKbTFpyvB04u%2BYmU%2BwkR2lMRar81RHstRFuIKhB13DODaF%2BD6O3X8Q0PNFGWcu04lh4nKTisKE8BR76BSmthgIoqK%2F8x4bDEWz0k61pWHmR1%2B24t%2BBLxVY06MlKLOK3Wc7SF8SAfze4bmNg1mjOs9c0Dqb12olmE2XDqWH%2FMppDEkIm5GxVIT2R4wxTkn8zPDlUQu44O4qEpnBieCMQDDcQtZFvIViHbPzPEDjWAQj%2BAcqHDhIY4wDFAtxvqNcFZ7JdY3fT5jLmkczR%2Fh5Q%2BieUTZBS%2FJGYtVtAd%2FURmkAISwF38xBLDX3CnqghEgEZLNSFpkrwYes%2FtrLK01r2S56ksigcVo2r6Kx%2Fj%2BSodtU6odUI7nRDT23l5IVl8eNduaPBWbuhlorcvQPT9A0U%2FOi%2FR6475ckU%2FOC%2FR66nkG74WFHkQtujFil76PzJeNSyWxA9iTbziYiQwF6nsIPMn7JMtgqPiChWUjwVb2aEt%2BbUlv03Jb4ZJggmgcOIDCiblPmJ7iel05T%2B9itVQ%2Bc9Ttx1vX%2F2zDbn7yyz%2FucfqyrbPrfpnt1nsS89iH49Sr16lfvNEtq0nAVuY%2FuIwdZzzg%2BlQg6lWcNFDwzZxdFGJo%2BP97blVXOw229mC9p3VXJyzznYK8e5N%2FJSnw%2FdlfuRKc%2Bl1lzIebl1R64reS%2BXFgNF36IvkLuANfNHLpO9ehPSHB0pfyvFcpO%2F9UtKXRvL60n%2Bkftr%2FZ%2FjtGi7DW39l%2F0juoeEwv8yM%2BNGyUkepJUsq9RRDv5xkKNzwhMKHIk%2FPyzb9ZN3ZUrY5fjqVxGc65BFsd88zHMzIa4pRrylG%2B8R7MKNBU4yGTTEaNcSIeYCGGNlNMXKaYtQUsp1tyL6vWGXBTDPWX5qsuKV6BJIHDPJfax11bapvk%2BcIr2YhILTLq5JTkPIV0FSROtmG2altmAc9bb%2FslrnV6o6510Di1Lhu6QfVj1x87KYMbSsjY73hQLic2as0xigh0QJw9e%2BUhnHhf4aVMXRT1bT2BqqLyPeLHSY%2FUAA2Jw3UQJ5HxXxTKQ4d5Far5ABEkcdQr65UWdjW95RVuUGt2OHqUe7IFOWeymbPZKNvOFO1a2u8d0fvntWGXi%2FPS2MJ3WearjVDIE2VQTSiCL6Cw9l6BixntBKo5axiTBYAGZk9UALBIooD1u1LUWbME1iO9RtIn%2BJZSHCMs%2FT3ildRD4nt80FcsltcEEdFnjY7nR%2FSEb7L%2BjT%2FUX6JiJikU%2F2eDpNfsbqW5wwV1yJt4Lm5Y9kFz%2BcpPDItzJqbbxMpum%2B%2Bk8W9%2FR8%3D%3C%2Fdiagram%3E%3C%2Fmxfile%3E°c& IDATx^ì ¼MÕûÆ_ÒhJ“1”1õ/*Q‘!* E(”B3fžçy.…È ¡Ì… †P”)M¦ÒÏ?Kû:®ëžsöÙûìµ÷~VŸûéºgßw­³÷³ßw­ê¹Æ½Ï¦gÓY£z§Ûpv¬H€HÀ‹¦ŽxCÛnç¹ë¡8\ÏJªT¼k; Ø1 ð)T5 ‘éôتT(!U/ét3¬ŸH€H€ ÔjÒGô™¥ è¬Áš4¨+ÍÕs¶ÖN$@$@!ò6®o™œ$@$@~$@‘)B‘éÇ™Í1‘ €Þ(2õ¶{G$@$ŠLŠÌ¦‹’ X$•ÈÚõUùå÷c4uC¦ŒÒ¼ë›a›g¸lXDÌ@$@$`3ŠLŠL›§«# ˆ€@T"³CÓZÒkøÔ ªMîoɵK‘5˜…H€HÀV™™¶N(VF$@$K"ó†Lä†ë¯•»ŠÌˆ83 € (2)2]˜vl’H€OÀ’È,˜7§Ê—Sæ,ZC‘ø)D$@$ /?ˆÌ‚ùóHù2çNg?~â¤,]¾Zú)bè<ø'bTÌH$@$`K"󚫯”k®¾J~ùí˜m"óå6ƒå¯¿O«£Ü/»ì2É•ý&©^±Œ³fúçŸ32óÃU²ñËoå豓’õ¦ë¤Ú“I‘Ûó^€ãë{Õ¿ï(Û&L¬†H€HÀ‹¼.2;´j,Ò§“9 +üø¢qÒÔ™Æß>ŠÈ$áDf‘’OÈü©Þ¥™&Íer[þ¼Òªé+R¼h‘ÄúO:-CF—¥+ÖÊ‘#¿È­¹o–毾$eº_åùaÿAiß­¿|³c—\ݵҺiyôa¼”‰H€H ˆ,‰L'Âe!2»´xArd½Aþ6.f›¾Ü% ï}$›Õ’Ü92+Û zk–¤1ès•ÊȵÓÉÖm{dÜÔ…†Ð­)¹þËsð§_eøÄ¹R¾ä=òp‰óÈ —c&  ð²È¬S³š2ßäi³.2ãèÁ=%Áš6m kâHDæ{“FJ¾<·Èþ%Ë–¯‘.}†ÈÔñÃäö‚ùTý šµ—Ë/O#mš7”›n¸^V}º^Úué'SÆ •BFžGŸ~A*>^^ê¿P]>^¹VZuì-ÏŸ*Y³Ü¶Ì@$@$à?aEf色Ûwý ³®–ÐpÙª—”Ûþó6¦tÒl¸ƒBE¦‰ù½VÊO?ÿ.MêUx'ÇMýPwidx:S'Zbîâµr…qá{âábòÑÊòÉÚÍòëï'”¥Èôß„åˆH€H ^™ó§—Š5ê';Üô†GsÁŒñrüøIõùìù‹“£ø,‘i66xäxÙ÷ã~Ö¯«¬]·QÞ0¼”/˜&—§I“ØŸQo½-W\y…T~â)ýDuùòÓÅ* ©’ÑïÆ¯Ô‘òeÏ…ù2‘ ‹@X‘™ÜÁ>¡á²¡¸R:ÈŠÈܱçGy ²sCàüÛ§}¡Zù°™ð¾!|sQd†%Å $@$ào^™÷Ýs—»·ˆŒ;é’*voaÙ¶s·œ0öi³9lL‚ìøvÏEù­ˆÌÏ¿øRÚuí§¼‘œ'pÚÎm›&Û—3gþ•ß~?*7Ýx½úûFK=^]ÎL 'ÓßË‹£# KˆXdÖ®RN¦ÌY¦*Êiì—Ì•=³¬Þð•ú·ù™Ý"óúÚ¡‚LÔJFMž'7g»I*–?·ÿ#¥D‘Ž?' `ð¢ÈL—.­4kXO‰µ”Df¨Ë•~PªV¬l­‘ùÝÞ¤òs¯È—Ÿ-–oôùn•†/Ö ;ivíù^š·ë.¥K—ÖÍ„ÍÏ $@$@þ$±È ¡á²Àb~f·ÈÜix2ÇþçÉœ>…œ>ýÏEžÌÃ?ÿ&?8"÷.˜h!ŠLNVŽŠH€¢%à5‘ 9fH/%.×oÜÕpBÛÑ8(hýÆÍ dEdnÜü¥´5ö\“9`ØXã`¾Sy2÷îÛ/;ví‘ÇÊ•’ÿýï2fâT#dw¶¼Þ¸¾T¯òdT}gf  ˆXdF2l»Eæ,ã$ÙCG~S{27½[& 2Bgqú™Þž½Tþüó”4|þü"3k1 øŸ€×Df•§UF‰ô䨤„ÐDèlí—›'~dEdys‚ìÝ÷£Ú“ùɪO¥kŸ¡²ÌœW\~yb½=ú0BuOHÿí¥÷ Q²õ«í2jPwã=Ú×ùbq„$@$@)ˆXdÆ3\ö´ñªˆÊñïâäØó§Ëö5]®2¨ùtYɘ>­:vÜ´Õ ´yreK(E&g= xMdBÂÉ©±—²0Df£×;Z™KˆÊ=ª“cÍÓeë6je¼ºìjy£å«†ˆÌ¤N Åa@86[–ÌRöÉç”Ítm†ÄvÓ‡¥N}þ >ÎH  àˆXdÆ#\ö”›ÚxOÞÕ…}ŸIß“‰÷hΘ¿\¾0èÉ?þ’l™¯—ªO”¼è=™™Á™À) ¤DÀk"¯-9pð,3ÞGi5AFëÉüû¯¿%•!S§N%óå¹è=™8øgàð·ÔëIŽ;¡Þ“٬ыê=™«>Ý /¾Úú¢îŽ6Â~Ë—)au,G$@$àa‹ÌHÆK¸l$õ3 DCÀk"3{¶,Ò¯[;iØ¢ƒœ<ùG4CUyn{[|ÒkàÈIJáÂe£n„H€H€H ˆE¦ÓžLZŠH€H€ì&à5‘‰ñßV ¯š8]6Ú„W˜ô0â‚b™ÑRd~  X PdÆJåI€H€´%àE‘i7LŠL»‰²>  p"™á*Âç —„ó Ä‹E¦Ef¼fÛ! 0 „™C»¾*?ÿv,"b7^—Qšw}3Ù¼U*”ª—Œ¨f"  ;PdRdÚ1X @tŠÌ誻tnŠL»H²  H PdRdF:W˜H€HÀ>™ö±dM$@$@š È¤ÈÔlJ²;$@$™03I$@Á$@‘I‘Ì™ÏQ“ €»”È|®qï³U=“N¦Ûòå’Bùr:Ùë&  è/2’¦ ë9jµb÷–âE‹8Ú+'  P‰žÌ©#Þ   ðÝE&.»·¬ôs†H€H€(29H€H€|K€"Ó·¦åÀH€H€4&@‘©±qØ5  ØPdÆÆ¥I€H€HÀ ŠL+ÔX†H€HÀ(2=a&v’H€HÀg(2}fP‡H€Hà<ŠLÎ  ˆ?ŠÌø3g‹$@$@q"@‘'Ðl†H€H€BPdr: ø–E¦oMË‘ hL€"Scã°k$@$@± ÈŒK“ €™V¨± €'PdzÂLì$ €ÏPdúÌ  Ày™œ $@$@$™ñgÎI€H€âD€"3N Ù „ Èät  ð-ŠLßš–# ИE¦ÆÆa×H€H€b#@‘?–&  +(2­Pc  O Èô„™ØI  Ÿ Èô™A{˜&۾ݧFUõñ’R¥B õûœEkdöÂÕü;9p>$YÛwÿ šÔôÙ7‡c Èä\ˆ7|§ô6URý×ðÔo$vóÑLüû9ä@5uѱi-)”/g¼—+ÛsE¦ƒpݨZ÷*7˜°MH‰×Œ¿ç‡îöÅEx÷–•þ6BÀF‡‡ºgÏžUz™H€Âàš ÏÈ‹9(2½hµúŒ…jz/}64‡!À5ãVm*¥ÈÔÆé<™†Æ¤W&0ç@c%À5+A=ËSdêiöŠH€HÀ™6@d$@$@$%ŠÌ(1; €wPdzÇVì) €PdúÇ–j$ ýó™A9Ç pÍ8ŽØÕ(2]ÅÈÆúH³sÐ1àš‰žÆE)256Ž•®é~CeeL,CNàšq’®ûuën_üãþ±»<ÄÄn¢¬Ïï¸füiaŠLŸÙU÷*Ÿáæp|@€kÆFLaºÛ—"Óó7Ìþ³)Gä,®gùºU;E¦[äj—¡ö‚ÝèiÛ{œ¤N}îg©S§–Ynª•—ynN±±þ£ß“6žµ­CýÞœ!”ºGŠÜž×¶:YCÌý>(2ýnaýÆÇÐ?ûmÂk±ýLuª‘kF'kØ×ŠLûX²&À…­óÀI2qP+5º“ü%s¯•u_l“‘=›J*ómÛÉŒýùf}åaí.øäßÿýOR…R¥Tð÷íÿI®»6½¤OwIsH$à ŠLg¸²Vˆ'^‹ãI›m‘€=(2íáÈZ|J é… Ãüå÷ãÒ¬ó(Õ«©üóÏ™0}‘|·ï¤¾,µ<òÐ=ê=¥ÇΔÍ_ï–Yo^m^”Fí‡É+µž‰3Kß7êËñªrÿ"Yoº^^¬ñ˜äΑY–¬Ú$?8"{ AùËoÇ$ï-Ù¥Þ³*q9pÌLy¸dz2}:×8,gPd:Õµ’@< ðZOÚl‹ì!@‘iGmja¸¬½¦Hzaûó¯SòþGkeåº/etïf2nÚ‡rÙe—©ðYˆÂöý&ÊЮ¯ÊµÒJ¨'óå6ƒåîÿË'uŸyT®¸"´îñ–^»YÞžµTz´®«Âr‡ŒŸ-·ÜœUžyò!ŠL{M›X׌C`5©–"SC¨ ý³ßؼÛÏT§¹ft²†}}¡È´¥5é~C¥¤(:aî¹<ÍeªÂ]³ÜxÔ«þ˜Ê—SNœüS®ºò %48xDz›"=ZÕ•¬™¯¿HdvlZKrÞÊï8,ÃæÊCTš©uÏ·”§óûËÚÏ¿‘n-_P-]ý…4¼užy„"3 »E“•k&ZÞË«»}yð÷æT¸ó“p„¢ÿœ×âè™y©׌—¬y_)2#g剜ºßPybH'“ Ñ Ã#LvòÌä¸!6oÎv“ìÜó£t{ý…dEæ Î %ƒ±Ÿró7»eáÇ륃!:Í„C}ÊQ{2—º…"Ó‡óˆCrE¦{ìÙ2 ØE€×b»H²ˆŠÌø±fK$î¶fÃ×2íýOäš«¯Tûüõ÷iÙkì«ÄÁ=ƒÇÍ6¼–‡”¸Ä鲦ȆŒd'L_,‡ŽüjìñÌdœ û˜Ü’3‹:]–žLNvY[™Úš†#ˆ ðZ1*f$mPdjc {:ÂÐ?{8²–ààšñ·­)2ým_GÇÐ?­Â>éL€kFgëXïE¦uvZ–Ôý†JKhìT  pÍøÛüºÛ—ÿøoþñÿÙ”#r–׌³|ݪ"Ó-òµ«û •CÃfµ$`™×Œetž(¨»})2=1¢ê$o˜£ÂÅÌ$ \3þœ™>³+Cÿ|fPÇq\3Ž#vµŠLWñ²q†þÒìt ¸fb€§qQŠLî‘ ÄF€"36~,M$@$@VPdZ¡Æ2$@$@ž @‘é 3±“$@$@>#@‘é3ƒ2ôÏgåp'À5ã8bW Èt gè_ ÍÎAÇ@€k&x¥ÈÔØ8Vº¦û ••1± 8I€kÆIºî×­»}yðûsÄî𻉲>¿àšñ§…)2}fWÝo¨|†›Ãñ®1…!èn_ŠLÿÍ?Þ0ûϦ‘³¸fœåëVí™n‘w¨]†þ9–Õú–׌oM«F‘éoûê8:†þéhöIg\3:[Çzß(2­³cI  Í Pdjn vH€HÀ—(2}iVŠH€H€žLÎ  p‡E¦;Ük•¡Ž¡eÅ>%À5ãSÃþ7,z2ým_GÇÐ?­Â>éL€kFgëXïE¦uvZ–Ôý†JKhìT  pÍøÛüºÛ—ÿøoþñÿÙ”#r–׌³|ݪ"Ó-òµ«û •CÃfµ$`™×Œetž(¨»})2=1¢ê$o˜£ÂÅÌ$ \3þœ™>³+Cÿ|fPÇq\3Ž#vµŠLWñ²q†þÒìt ¸fb€§qQŠLî‘ ÄF€"36~,M$@$@VPdZ¡Æ2$£GÊÖ­[UNü¾eËõûµ×^+… V¿g̘1ñ÷ªd ( PdF ŒÙI€H€HÀ™6@Ô© †þ¹c½{÷ʼyóäý÷ß—+VXêDîܹ¥téÒR¹re©T©’¥:X(z\3Ñ3óR ŠL/YË}eèŸ?ìÈQÄ×LüXdz%ŠÌxÒŽC[ºßPÅAÜšX¹r¥•øÈLšB½”¡ÞËP¯f¨·3iyˆMü”*UJ @™œ!À5ã W]jÕݾ<øG—™b_?xˆ‰},c©iÛŽ]râärüÄIÙ¾sw²UÝV ¯dHŸNÌÿÇÒËZ'À5cÎ%)2u¶Ž…¾é~CeaHÚ™ŸA´®ž"ÓŸvå¨l&ïb‹-Ô>¹råRžLˆËx$„â¢=xP‘ lçΫb"H™E&g ø‹Äe#äÄ"1[ÖÌRÜð:Âû/d¡‚ù,x¿!8·ïØ­Äë:Ã3zðÐOª.x9ëÕzFš6ŒÏußòX4"@‘©‘1Ø= À“X¦LÕ¹fÍšÉСC]é(<©ÅÝ·oŸšË—/w¥l”¼D€"ÓKÖb_IàÒày¬U¿YâþÊ*O=jx+Hñ¢EöîóÍ2gþ"™³à#ÕDl¿níb²Žu–“€f(253H¬Ýaè_¬/.Ë-·¨Y쉄GÑÍO*BølBBBܼ©nŽÙé¶¹fœ&ìný™îòbë ýsÆê˜Øwyß=wIÿo¡°çÃ`iñ|­ðpöì?B–­X«„悜n2PõsÍøÓÜ™>³«î7T^Ãmz1±ÿÒ|Ï¥ÛcÀ^Mì Å©³V_—âötjŸkF'kØßÝí˃ì·¹Û5òg,€µ‚ÐØU‹Þs¦jmؼ½šSÆ uÔƒAW|•…kÆWæL E¦Ïìªû •×pwîÜYzôè!õêÕ“‰'jÑ}x33eÊ$éÒ¥3ö¤œÐ¢O^î׌—­¾ïºÛ—"3¼ ½–ƒ7Ìö[ ¯#©X£¾qˆOåEtãÔW„ë>Uý%ur-E¦½6æš±—§.µQdêb ›úÁÐ?›@þW ö@Λ7Otòd"t!¼HgÏžµwÀ¬kÆßF§Èô·}uCÿì· öFÖ~¹¹ª᪣‡ôŒk¸ìÒOVKÏ#_B‘i¯¹fìå©Km™ºX‚ýÐ’@Æ eìØ±rÕUWÉ¡C‡´8ÑûB»uëF‘©åŒa§H€H€ì&*2ͺñZ¼^¯q"aæ†[eؘ„Dq‰p]œ8K‘éqÖé7™~³(Çc+PA‡waâDW7_‚W© t×LôdÚjnVF$@$ !SdâПªÆ‰²x…Iè;-‹…¼Â$»!­¼ÆÄ•ÛvîR á]šfB»MÖS1vE¦†s„]ÒE¦~6‰©G ý‹ ßE…M‘yå•WÊ©S§B§ºâÿñNð^&=Ý–"3v+pÍÄÎ5 œ'ÀÐ?ûgC¨Èœ6a¸j!¬xŸå’åk.œ¡­#´6ÜþÍm†˜4ß¹Z¶`þ<êÝ›õjWK Í6:"Ó~ó ׌P5¨’"S#ØÙÝ÷Ù9ÖxÔeŠÌ¶mÛÊâÅ‹eëÖ­ªÙºuëªWšàu"N§É“'+q‰½˜H¹¦7“"3vú\3±3d $@ç ðûgCr"3´ Ïã~ãPžõ7«6l:w½Ž$APBŒBT2„émó&»ç“"3šÑçᚉž™JPdzÁJQô‘7ÌQÀŠ «)2!(›7o.C‡U?xO%D&Â^)bG‚˜\¹r¥àU%ø1êGJ—.-©R¥R¦ÈŒ8×Lì Y Ày¼a¶6„™‘´ˆ:BSñ¢E")vAŠÌ¨‘ET€k&"LžËD‘é9“¥Üa†þÙkÐP‘i†ªâ"œ€¦ØD«Ø« ±‰PZ3œ6cÆŒ— ­E=¦g¿ã=œ¨3éû8q²-ÚFÝf¢È´ÏÎ\3ö±dM$@ÂÐ?&"ÓŽnQdÚAñâ:.ë W·k¥ÈtÛl_kɉÌÐCâ0ž+V$ ÆK "žÏ¤"2i~Sx+Mir Qdj=mØ9   PdÚ“U‘@œPdÆ 4›ñ&p"3tTs…'ÿ7…d¨·291iz–ZÔÄÐ?{Í@‘i/OkãšÑÑ*ì x—CÿD–.]*åË—·ÍˆVEæñ'eÙò52{þ"Ùf¼Gó„ño¤ôÆ;1ñ>L¼³\é¥PÁ|õ•§ËF„)êL\3Q#óDŠLO˜‰t‹E¦[äÙ. €W à0»S§NIÏž=¥eË–1ÊÈܶc—4z½£8x8lûSÆ •HÞ›I‘%3@"ŠLNHE&§‡np€T«V­dáÂ…röìYÁ{T{ôè!E‹U]}ã7¤fÍšòÿ÷qíz¿~ýä—_~‘ØÒîÍ7ß,‡ì?Æ8¯¹æ)Y²¤Œ?^²fÍj©aƩ÷† "*TÆËO<u]ß|ó¼ýöÛ‚1¯_¿^ÕóùçŸG]Ï¥ üõ×_R¯^=™>}ºmuZ©è™gž‘5jHÕªU­k™-Z(;äÉ“'®í²±ä àÕ^M›6•ÿýW­_|GÅ"6£™û’§ª×Oô\š½¼ïž»;¼aÓÖÄß)29“IÀ~™ö3uµF†þًߪÈÄ þ×_lg®¸â É›7¯Ü~ûíÒ®];¹ûî4þ‰Ù% xmÍ<ÿüój,ð@ˆ}ôÑGòÒK/ɧŸ~ªÞʵŽ;J©R¥.óéÓ§sÏ©´oß>AùòEv®Û¬Y³¤X±b*ëï¿ÿ.?þ¸*TH&L˜®x²Ÿ‡ŠL¼f(GŽrà 7D]×ÚµkãåË—ËñãÇeÇŽrß}÷E]Ï¥ @¼(P@½+×͉ÈxŸk–,Yäõ×_S´>\åûõ×_•›|Xy2qsÙ¾}{G=ðdþïÿ“.]ºHBB‚NíÚµ¥W¯^rÙe— ÛwÞ©>Cß! GŒ¡> M'OžTPÁóоáßø.G´~Ÿ={¶*Þ¨Q#%²^|ñEÕNóæÍÕß!†á½õÖ[åË/¿”Í›7 /^\ÙBû·ß~“W^yE‰fŒœÐôÌÀñÇ0CŸÑL™2)±ÿꫯ*~°a‡T]=ôŒ;V x´q©vñàýþøãU}˜£˜ñxgÎ x‹á½Ç¿áÝ®S§Žò _wÝuÊ&UªTQcÅüîÓ§z€ÇïH=¾ï0¿1WÍtÕUW©ïœ·Þz+ªF+2k¾ÔTLOe¿ní¤j¥ Qµw©Ì™¶`¼¨üã W·k¥ÈtÛ6·O‘i/ÐXEæ˜1cÔ bhúóÏ?Õ^Û¶mÕͼK›6m’;î¸#âÎÓ“1ª°½¶fˆ›~„žá¦¢ 4…z2Ÿ~úiùᇔGð–[nQânÛ¶mJèAŒA Ì;W ±‚ *! ‰ù ‘€~”ƒ¸€þá‡VåF­ê†PL*2K—.­Ú‡ù!ž<(<ð€¬\¹R Qü=mÚ´‰Ìï¿ÿ^^xáyöÙg¥I“&— D~ l!¸!œªU«&»wïVý1ÃeM‘ ^ùóçW‚!´­[·V"lÁ‚Š|÷Ýw²fÍ%ô!ÐŽ;¦D5쇇°Û’%KTyˆDüm7kÖL6lØ "@˜ÝsÏ=*Œñ¹çžS!È}ûö•÷Þ{O…9BDãïˆè3æ8͘1CÊ•+§D!DÝÈ‘#U^oŒý€}`oŒóçRíblHèßöíÛ °ÅƒŽK‰LÌxOÁ6‚!€‘ 0ñY§N®{'3ð†ùÝPO&Ö9Ä&ææW´QÑŠLÜÜš)ÒPØHæEf$”¢ÏÃ5=3/” Èô‚•¢è£×Bÿ¢š+Y™æ@f‡›=Üìcn#M™‘’ ŸÏkk7û¸yŸ6mšgÙ³gW7äðî`^$™ðH™^­œ9s*¡‡„ éÓ§Ws{=á…' óþçŸVB"í˜ûQ¶M›6ʱa˜Td¢<òˆjõBDÀ‹‘a‚!ö6^Ê“ ñoÆŒŒÍO— DG®\¹ÔxÒ¥K§Ú‚¨‚K*2‘ÞHˆE$Œ¢ù±6³e˦<ðB€Â“v)‘ áZ½zuåÁD‚`ƒ·â "Þ\çËÈ›t¿#<¨ÝºuSu Ü}„Єàƒx‡PD? ÒÀïÁL W=qℚøžex^1<€M ú‘Þ|óMeˆb„2¢ 2¨Ï0·`_Ì)Kxc‘À¤V­ZªÏ™ð… V¶G]Ø/‹»Á>ð ¢äÚ…Gíáá Á+Ї&xøq)‘‰¹¹nÝ:ç} M°öB|º™ú'Ê“x bŠKx£­&Sd6iPWš5:÷p"¥ôP…gå࡟T–òeJÈè!çæp¬‰"3V‚É—çšq†«ÛµRdºm¶¯5'E&^·n]–ˆ›`x0"M™‘’òw¾þùG…¾BDB¼@$™?„ÀPƒ'ËÐýs¿ÄM=D¼š55E&¼c)H8|/xÖ z’™_}õ•ª ^2ÔO 'ª áð"xŸ" —E(+Ä Â+ñs©±À«X¤H%tÌ>ðª`\IE&Ä(ÄÉœ9s.š,Å+B‡!ÐR™ðæApA$!!¤³~ýú°úƒtà%„‡5©È„ðG(«éåƒwÞ;ˆ~x%áÁD? °`ó‰'ªýª¶†‡âe°O\Ø Ðx ‘Î ›Àî¨â:iBŸ!†ñ¤Ð=š™° ¸B|ãa¼¾fºúê«eÿþýêáDrí‚+D&<æ÷<Ï(F¡"¢m`ž Ÿø 6à ¦.˜H_|ñ…úÜ™Ü%Ûàá Ö|,âÒE´"3tO&ê€Ðìк±äÈfíÐ0³™îÎ+¶î-™Þ²{gN‹LÜHâ‰/n áíˆ4QdFJÊ_ù êf‰pÂн¾D(*¼fIE&ć)d úp#ï<‘%¸y‡@—ðúë¯Wû9!c™0ªH¦ÈÄ>,ô^,$ôó?‘‰üðòAdÀ;v©±`mÀ“ ¯%Bô6üä“OªΤ"{FQ/„ ØB\Âc‡“Jáá¼í¶ÛTH&ök†ódB ¡,„ĸB ATÂ{‰t)‘‰r7„Ó#AÜB”›žjô ¶÷â‹úvà僇§àâÂ[!2C˜"¢œÀ u"áoh‚-ôtÙ¤"óùÑx¦M†˜cðdBà&×.æ„)ÊÁóŠ„0[Ìxßá-‡Ç ssóã‚}!bv‡$¨å°_aϦ·[fr…l`F2ØÑhE&N—}òÙ—Œ5ÿÇÍCl–/SRªT|ÌR·(2-ac¡€ Èô™á½ú§;~'E&nx.‹›W„ëáF-ÒD‘)©ðù¼¶fî¿ÿ~µ{› ⦈0Fú¯D&„Ê£>ªÄU¨ÈD8"öãÆb^IIxš°Ç7éø‚ Gˆ¤¤â$ROfr"ÞE„ׇCaÐWSä&µTrÿ@@`?3Dð¥Æ‚=£¿xËV­Z¥è'ìM*2Ë”)£ö™"Ô}ƒ— ýÀKˆ±â÷Œ3ª=°h"âõc¯bèžLˆSˆ'Loñc=¦¢ Ö"™S¨× ='x˜qØì¡Š°Y<”Â<—齄8DŸ1Oཆ(ƉK‰Lô|pXæ<ºè/ÊÁ ‰È„ÇÞWìÅÜD˜$8aÿfJí¢¯ʘ;wîT^q„*£ß8UƒàÆ\FØ%D&æ&ÚÀƒ|wÂ{Œù‹“•1ô"ÛÍÄÐ?ûéG+2ќ۰E‡Ä°ÙÐ^¥OŸNêÕzFêÖª&Œß#M™‘’Š.×Lt¼¼’›"Ó+–аŸ^;Ä$Âa¹–Í ‘‰×<à<©Ç“xÜ"üÍ|Ïa$ƒ¥ÈŒ„Rdy¼¶f°ç%pCoî}ƒàBx$Nw…›9s¦zp*2±ù Pps9A/Ä nÖ±o"¢ CˆQ+á²É‰Lx½Þ €“AÑ&Äm$"{ípÂ4!D’ öÞ{ï½Ê³Y0ÇëR§Ë"¼ áÙE¨-Ä"¼e8°û !ZÁ ‚ ¢§¨Â« ¯2¼‡¡§ËblG‚HƒXÅTdâ &;sï$„-ì¡‹°zØÞhض…(ÉðVC\™nx_!Ô B‘R{Ø»‰Ã‰Ð&\`Ï,DmÒW˜\Ê“‰ú!„1v„îb(<¥ˆ)µ ¯4Øa ¼®l O.쇹‰¿anBdb^!!»ðò¢$Œ\pà’›‰‡˜ØOߊÈD/ŽŸ8) SfJÂÔ™y5ñ9ÄfÇV#>}–"Ó~Û¢F®g¸º]+E¦Û°¹}¯Ý0Û<|Û«‹Ud"/éûÀà)ÂSy3an"£I™ÑÐJ9/׌},ÃÕ¡€ƒjðÚÌaˆ8xL!ÚìJ™ðª™'ŽÚUo<ëˆÂ>RœÔj5Á ‹}Œ¦AHßðÈâµ3n¿¾„7ÌöÏ8«"3´'K?Y-³ç/’e+Î…a‡¦Ñƒ{&ûͤù(2í·-jäšq†«ÛµRdºm›Û÷ZèŸÍ÷½ºXEæ¥:„}T¦û¢M™Ñ»t~®ûX†« §LBPBàÐ"xÚðÎOxâìJ~™Ø{‹]£}— â ˆyx"òj'[»läD=ðÞ#䡵n'†þÙo;D¦Ù+ì×>:Aæ,8<š+Î:K‘i¿mQ#׌3\Ý®•"Óm °}­ Ä*2“{O¦¦È´ƒ"ëð#6ƒ=‘^÷à!$ïÄŒ6á´a„þb.N¶ J²Ê+(|¼>N;E¦ÉÂŒæ¿#yŸ&E¦×gûO™ñ¤Í¶‘€w ðûmç„È —M—.­lY³0lÇ.‘¥ \3–°i_ˆ"S{E×AÞ0GÇ+\nŠÌp„¼ÿ9׌÷mÈ€NxÃl¿5¢™5_j*'Nþ!åË””Ù²HvãÇLV§Ì®ß¸%ñoujV“Nmš„í8EfXD–2pÍX¦}!ŠLíM]ú¯p¹)2Ãòþç\3Þ·!G@:`èŸýÖ°"27lÚQG æÏ#¼71¢¼™aŠ:×LÔÈ”z¼ºœ0þÿÅê%Cútqïƒ_äšñ§e)2}fWÞ0ÛoÐܹs˾}û¤K—.‚ðY7<—·Ür‹ò`&$$HݺuÝìŽ/Úæšñ…9Іo˜1…ªŠ“bûukg)ÜÕjÏàIí5p¤ ½uÛ›ju :—ãšÑÙ:ÖûF‘i–%úg¿YLo&jnÞ¼¹ 2ÄþF"¨qË–-Ê« I/fÀ"ÌÂ5!(f#ˆˆCÿ"Âd)“®ŠÂØWY×8¸§|™–ꊤÄ國eÎüÅ*{•§•þ=ÚGR”y¢ À5,e¥Èô±ØU÷ DóرcÏ&<šuêÔ‰K‡öîÝ+ݺuô óý÷ß—k¯½6.í³  Ð…„_›Î}ä࡟T—Òa«šÅï-"xI¡‚ù,wuÿÁC†·rñÍͲtùÁ;5‘²eÍ,Z7‘òeKZ®›I h(2ƒfqŽ×2x+W®¬Bg‘°O³téÒJlâw;¼•óæÍSbžTó€ ²s¼¬‹H€H€¢!°ô“Õ2{þ"Y¶bíEÅn+Wí™,fO3áßø;Òú[ÿ¾}ç.Á~Ëп™–+ý !`KJÕJ¢éó’ (2}6 úç¼AáQ„'Ó›h^EPüÀÓhÅË»råJå±Äï¡ Bm‹Êd/®{y²6:†þÅw@ âU%‰ð@âu%¦—3šžà½™… z[|†W´°3^qÂÃ}¢!h=/׌uv:—¤ÈÔÙ:úÆCL,@³XFxñ*8Íê 4MgèïðJš"2ô÷¤Ý¨T©’­ð–R\Z4RŸf"€äá,ºÛáÝ[Vz˜0»ž”1ÑcN ¬ §Án3ìIšB=›±¥ tÏn\3î±w²eŠL'éºP·î7T. ‰K“Ø7i Nx#­¤\¹r)AizD­ÔÁ2Ñàš‰ž™—Jèn_ŠL/ͦÈúÊæÈ81 ˜¸fü9(2}fW†þécÐKy,/åáÔ§çÁê ׌¿íM‘éoûê8:†þéhöIg\3:[Çzß(2­³cI  Í Pdjn vH€HÀ—(2}iVŠH€H(29H€H€H þ(2ãÏÜÑúç(^VîC\3>4jÈ(2ým_GÇÐ?­Â>éL€kFgëXïE¦uvZ–Ôý†JKhìT  pÍøÛüºÛ—ÿøoþñÿÙ”#r–׌³|ݪ"Ó-òµ«û •CÃfµ$`™×Œetž(¨»})2=1¢ê$o˜£ÂÅÌ$ \3þœ™>³+Cÿ|fPÇq\3Ž#vµŠLWñ²q†þÒìt ¸fb€§qQŠLî‘ ÄF€"36~,M$@$@VPdZ¡Æ2$@$@ž @‘é 3±“$@$@>#@‘é3ƒ2ôÏgåp'À5ã8bW Èt gè_ ÍÎAÇ@€k&x¥ÈÔØ8Vº¦û ••1± 8I€kÆIºî×­»}yðûsÄî𻉲>¿àšñ§…)2}fWÝo¨tÁ}ã7Êï¿ÿ.©R¥R]Ê‘#‡¼ñÆòÊ+¯¨ÿõ×_Ò¯_?™:uª8p@}þüóÏ«³+Cÿ"3(DæâÅ‹åž{î‘S§NÉìÙ³•ˆüꫯ¤P¡BòÜsÏÉñãÇ•ð»å–[ä³Ï>“îÝ»«ÏÆŒsQ#[¶lQBô†nˆ¬!¹Ö®]+;v”åË—G]–b'À5;Ck ÈÔÙ:þìCÿR¶kÑÒåØñ‰y³d¾Q½XKjT«¨ þý÷)›0Mæ/\*‡ü,Y2ß$O?ùˆ4|±¶ñ÷²‹*ñÕÖò|*Ræ¡û£žP»ö|/s|$m›7Œº, ØG€kÆ>–:ÕD‘©“5Ø—¸™f£·Þz«ôîÝ[®¼òJ屄à¼üòËû&þ>a„ þŽ O=õ”4lØPrçÎ- 4\¹rɲeËĬ³L™2rèÐ!©S§Ž¬_¿^®»î:4h”*UJŠ-*GŽ‘J•*)ÏéðáÃeÈ!ò믿*Q;yòd)P €Œ5J¾üòKÙ¼y³ìÛ·OŠ/®þq»uëVÕî·ß~+÷ÝwŸŒ7Nn¾ùfãbý·´hÑBÞÿ}É’%‹¼þúëJL3‘@PPdÅÒ§W@d&¼9@î(T@NŸ>-‹—­’–zÊÂY“$_žÜÒ¬m79ùÇŸ†ðk 9²g•Í[¿‘c'K^㳞[^4Ìm;v)!z]¦ŒQ#Ø´å+«T©¢<±:u’üùóËàÁƒa»­[·V"m´oß^¶mÛ&ï¾û® û}衇dîܹR¬X±ˆÇÆŒ$àe™^¶ûîG¡"Ó_éÇkH«¦/ËW\.†½% gO’ËC¶¦À£9Ðø{Ÿnm/ø;ʿܴԬVɤY¤C’=kY»n£äÌ‘MÕY¼èÝräç_¥u§Þ²õ«m’1cißò5¹ïž»äéš ä×ß~—reJÈ>dò´Y2qÊL9zô˜ä½5· èÙ^nÍSÞ™>WvîÚ#ß‚öÀÁÃRäÎÛ¥kû’ÕðÂnß¹[:í~¿ïG¹ëŽÛ¤w—6’5ËMF”Ôié9`„,]¾Fn¼á:yñùg ì£~4)ÇDÉ ÈôÙÄ`è_d…ÈD8lêÔ©•ð»þúë¥C‡Ò¤I©\¹²ÀóA‡4~üxõw3A4–,Yò‚†BEæÝwß­¼éÒ¥Sâ^Ñ®]»Êºuë”§¢ÐL¡"Ou=*7Ýt“üöÛoªÌÏ?ÿ¬D"D&<Ÿ~ú©*úæ›oÊöíÛ¥ZµjÊC¹iÓ&õw”[±b…¡9sæ”éÓ§Ë< >ƒ·3}úô2pàÀÈ@ ׌¿L‘éoûê8:†þ¥l•P‘ùqý]úÉjiÚ¦«ÌŸ>Þ]],×\}µ¼Þ¸~Ħ ™ªÖ•Ž­›HšUeÔ¸·eõ§ŸËŒI#¥kŸ¡ræß¥s›&òñÊO¥×€‘²fÉ, õdB<–¯ü¼¼?m¬äΙCzô!g^À{ ‘Ù£ÿp™3u¬ä3Ä端w4<±ùåµWêH¹Šµ¤C«ÆRêÁbÒwÈh9pè°ŒÞWKv·W†öí"?"¹Æ‹Mdô^Røÿ E<¶ däšñ§¥)2}fWÝo¨tÁ\¸¬Ù76x7GŒqQwáìß¿¿ s M¡"³jÕª²cÇõñÎ;•„È„øk×®Úÿyíµ×J«V­¤Q£Fx2!x‘ç½÷ÞSá­ª™3gN™¨ÇÜ:vìXùúë¯UØ,êœ3gÎ}‚‡Á3 1m&ˆè=zèb ×ûÁ5㺠í€îöåÁ?Žšß•ÊyˆIx‘yòä’ʸ.ýk¿k Ïâk/?//”uò˜ARôî;/h TdV¬^_6®ú@Ò^sµìÚ³W‰×E†WtØèÙòå7Ò¹]3¹%×͉åCEæiãºâÄI¹þºLrôØqn”ùõ÷£2¬_%2±GtæÛoª²SfÌ•=ßï“ÇÊ•–ÞGʃ·3í5×È-_ue^êÜ(׌ÎÖ±Þ7ŠLëì´,©û •.ÐR™‹-’^xAöìÙ#2dHìòáÇÕK|ž’È„¨„‡)Td«‰½šWOiW®\©<¦£ß}÷]âÁ?ðXÂËo)¼«ï¼óŽ,\¸0QdBTŽ=ZÕmŠLìåÄÁAð’"Á‹š D,„*B€³e˦>C[W\q…ÚÇÉtŽ×Œ¿g‚îö¥Èôßüã sÊ6M.\Ö,ÑËlxØÚ¥]ó‹*©R»±O³‘»·ðŸ…ŠÌW_ï$KçM9w½Ûûƒ¼Ö²³™†5ö®” éÓIý:5¤Ö³•/ðdž9ó¯Ê³pÉ'Æ!~×+¡ ÁiŠÌ»¿KÜúî¬ùò­ñïÂÿw»,þx¥Œ|áö<äÍwÉsKN〣óy)[BZ¼¹—Ö«#ùqÍøÓÒ™>³+Cÿ"3hJ"5<ùä“ròäIµûñšì±ÄÁ:3f̰$2Q'ökb%öbâÀx&±W‚'ØŽ9Rí™ÄÉ·þù§êBgá©D¸lr"'àæÍ›W…õ>üðÃÊ <þ|yõÕWU=¦Ta¾²IErdÔü™‹kÆŸv5GE‘éoûê8:†þY™+׬“V{ËòÞ5"yÒ&Vôó/¿Ié'ªËÄQR™•KÞç"‘ ¯föl™å*ã`¿ ›¶ÓF¾)òドÿ,X´LÆOž.“ oi¦k3ÊÜ>’«×%ŠLˆÊÿN¦ pÍøsFPdúÓ®UáD&Âe»uë¦Ä<˜EŠQ'ÏâdXžƒ‡¦ÐpÙKy2!ëÕ«§¼›™2eR'ÕâDZ¼¯'ÂÞyç2qâÄÄðZüƒpÚ–-[ªý—‰É‰L„õ¢_”ðT¢oðdÂkŠ}§8pè£>R·6mÚ(±ÌDA!@‘Ksœ^!’'c¨ß¤­ñpô/éÔ¦©:Qv×îï¥{¿áê`áý»Z™¨3Þ[¥ñ+/È¡ÃGÔ? g'{%‘>ƒÞ”Y·§Ï‘%¯–„Ñäï¿þ6úÑNy2ßÜC…Ë&'2Û¯>)ûäsÒ§K[y ØÝÒßð„þðãAykxéÜk°:á½{‡–òÓÏ¿HzÕþ̤žX¯Øý$h PdFKŒùI€H€³2Cÿ|fPÇq\3Ž#vµŠLWñ²q†þÒìt ¸fb€§qQŠLc¥kºßPYË€“¸fœ¤ë~ݺۗÿ¸?Gìî1±›(ëó;®Z˜"ÓgvÕý†Êg¸9àšñS‚îö¥Èôßüã ³ÿlÊ9K€kÆY¾nÕN‘éy‡ÚeèŸC`Y­o pÍøÖ´j`™þ¶¯Ž£c蟎VaŸt&À5£³u¬÷"Ó:;–$‹à”Wœæúù矓 €(250»@$@$8™39ìS§N ~vìØ¡^IbgÂË©S§N­~˜H€"'@‘9+æ$¯À;-ÿ÷¿³Ré‰ò¶uù©ê/ÉàÞeÛŽÝÆ‰°ëeP¯Ž¶ÕÍŠH ˆ(2}fu†þÙoÐHùòåå®»î’E‹I–,YdРAòÄOÈÖ­[¥yóæòàƒÊâÅ‹eìØ±êÝ—ðdŽ5JV¬X¡ÞmùÓO?I¥J•¤lÙ²ê}›gÏžM¬ïÑ»+çÏŸ¯Þ©U®\9yçwäꫯVïÓœ0a‚z&ÚºÿþûeΜ9R¸pa5Ð%JHÛ¶mïéd²F€kÆ7¯”¢ÈôŠ¥üÓO†þ9oËQãÞ‘ÿýWš6¬k[c™¶¡Œº"®™¨‘y¢E¦'Ìy'u¿¡Š|$úä„ÈÌ‘#‡´hÑBúõë'Ë—/—jÕªÉîÝ»åСCJ`véÒE‰Í/¿üò‘Ù®];Ù¶m›¤I“FòæÍ+?ü°Ì;WF-³fÍR"tòäÉ2tèPYºt©\vÙeJd¶nÝZjÔ¨¡DfÅŠeäÈ‘’>}z%F!rÛ·o/GŽ‘|ùò){ÕUWéÌc=ášñ˜Á¢ì®îöåÁ?QÔÙyˆ‰}F:òó¯ÒºSoã™Û$cÆ Ò¾åkÆõîJiݱ·jﻬWû™CåŒá±ìܦ‰|¼òSé5`¤¬Y2KB=™–ò•Ÿ—÷§•Ü9sHþ#ä¬Ñ…ž[*‘¹}çn™ùöh#‚èòxµz2¤O'¹£P)ýxuC<¶•‡K=(#Þš$£Þz[Íž¤DfË=eÔ R¶Ô2fÂY¸d¹,œ5IžxæEUïÝ…ï0„èJ™6sž¼=v°}hM\3þ4J*2* ¯%RÉ’%¥oß¾Êó 1Yºti%2Qï+¯¼¢ÊÂÛ ÁŠÃƒL‘ùí·ßª¶°ç3sæÌJ¤Ö©SGj×®-Õ«W·Ðª‘kÆßƦÈô·}uCÿì³Ê°Ñ ²åËo¤³á}„GÑL¡"óô?ÿkOÊõ×e’£ÇŽËp£Ì¯¿•aýº(‘ áÙªé+ªh“Ö]ä‰G˪ßß~w¶L›0\ý~úôi¹ç¡§dÎÔ1JdN~w–Ì™2V}væÌ¿r_™Š2wêX™ùþBõ@ùõÆõ•½û®;¤Ö³•íp@kâšñ§á)2ýiWŽÊF¦'óرc’6mZUóÓO?­æ½÷Þ«„ÞW_}¥þnEdBL" >\®¼òJy饗ÔÁA¦ÈDXîõ×_Ÿ8"xPüqiÕª•üøã*Œ–‰H y™œ$à]†U^à éÓIý:5”¨ ™ȳpÉ'rà ×a­W+ÁiŠÌÇÊ•’'{XAhÖ¶› ýù—ßä«m;¤÷7á<\±–ŒÒS‰Ìe+ÖȈÝ?{òÙ¥[û’Êø¯kß¡J€+[Éð|N–›n<}ö.iöœì'@‘i?SÖè3æžLˆ:x"W­Z¥ñÙ¾}»ò:Æ*2±¿óöÛo—nݺÉÎ;÷x6iÒD…Ë&™'N”:HÑ¢EÕaAL$@—&@‘ÉÙAÞ%°kÏ^Éž-³\e<€ÅžÉ†Í;È’÷§(¢yðNš?yºL3H2]›Qæ~ð‘¬X½.QdBTšÞKSd¦J•Jy2§Ž¦à7<¡÷–zJ>œ9Q‰L|6{ÊõÚ)nì½|Ú[’5ËM†¸¬,o{C§Ïž/ïMå]¸ì9 8L€"ÓaÀñ®ž¡ö‡È„Çò¹çž“wß}WíË0`€Ú[‰_c™›7oVû0ÿøã%N;pà@ùì³ÏÔ‰¶IEæáÇ%[¶l±Y·n]û°¹fümpŠLÛWÇÑ1ôÏ>«ÔoÒVòç½U¿ò‚:|Dž®Ù@ÎNy.“?ÿüSZ7k oOŸ#K>^- £Èßý-õ›´SžÌ7÷Pá²É‰Ì‹Ý#¥Œ=™ý{´—²Ý/£'L•!£Æ_°'sì°ÞR¦äý26aš,[¾&Qt¶êÐKÖ¬Û(/×­!/=Ïí*vX›kÆŠúÕA‘©ŸMbê‘î7T1 Υ™ŋW¡©:$ˆQœ0»wïÞ Âhuè›ûÀ5ãE«EÞgÝí˃"·¥Wròû,õíîï¤Mç¾òýÞ$C†ôê4ÙšÏTR§¾¶x£»¼ôB ©þôòš!&wîúNí¿D8mïA£¤{‡2á²dE&Bh?]¿Iº÷&¿=nœ:[Bp€PûV¯)Oæ‚ÅËŒ¿ <©ùóÞ"}»µ“œ9²©}øÑ'*ìvù‡ÓåæìYíl€kâšñ§ñ)2}fWÝo¨¼ˆ['‘ùqÀúàƒÔɲL±àš‰¡Î5èn_ŠLgµ¾ñ†Ù7¯”‚8í;d´ÌŸ>Þ+]Ö¾Ÿ\3Ú›ÈR)2-aÓ·Cÿì·Í_ý¥Þa‰×–¸°/ówÞQ¡µL±àš‰¡Î5Pdêlö¡þ´+Fõ÷ß§Ô{;o+W^­ÿ¼ç‘qÍÄxœš£ÈŒh6C$@$™ñgÎIÀ¯ªÔn ×\}µz‡fF#|—‰HàÒ(29;H€H€|K€"Ó·¦åÀH€H€4&@‘©±q¬t¡V¨±L pÍøÛú™þ¶¯Ž£c蟎VaŸt&À5£³u¬÷"Ó:;-Kê~C¥%4v*иfüm~Ýí˃ü7ÿxˆ‰ÿlÊ9K€kÆY¾nÕN‘éy‡ÚÕý†Ê¡a³Z°L€kÆ2:OÔݾ™ž˜FQu’7ÌQábf®NŠLŸÙ•¡>3(‡ã8®Ç»ÚE¦«øÙ8Cÿiv:\31ÀÓ¸(E¦ÆÆa×H€H€b#@‘?–&  +(2­Pc  O Èô„™ØI  Ÿ Èô™Aúç3ƒr8Žàšq±« PdºŠ?3ô/fç c À5<‹Rdjl+]Óý†ÊʘX†œ$À5ã$]÷ëÖݾ<øÇý9bwxˆ‰ÝDYŸß pÍøÓ™>³«î7T>ÃÍáø€×ŒŒ˜Ât·/E¦ÿæo˜ýgSŽÈY\3Îòu«vŠL·È;Ô.CÿËj}K€kÆ·¦U£Èô·}uCÿt´ û¤3®­c½o™ÖÙ±$ €æ(257»G$@$àK™¾4«¿uüÄIÙ¾s·$þ'MÒ§“Û ä•ôéÒJ¡‚ùü $ÊÑ‘_”À˜ÝÓ(2=m>v^cÛvì’'ÿP×`󚜴»¸›×cüŸé<òãlð;ŠLŸYØo¡û’ ·Êº›åÀÁò~ãKÃ….G¶,RìÞ"rß=wFx’_øéâ·5~ÄÁÊA‘,{ë0Z?†þáÚ{îg³ì7®Å¸G›Ò"³q-Æõ×âb÷V4‰üR¶²×Læu¸1Rd†#ä±Ïu¿¡Š'„Ѥ©³déò5]ÈÒÁ3i\ p‘ Mæß·ýçåÄg¸ â)ëŽo÷\7»!8Ë—)!ukU3ÄgÖHºå™<ä©ü°f¢q°rën_üã¿ùè—CLæÌ_l\‡W«kqÒT0%ñð6{È5ÔôZ†F0®é¦¸6Ÿ4<Ÿ¡ ×áòeJJ•Šùn"_ä&õËš‰|ÄÁÈI‘é3;ë~C•nˆ£^F^pAƒ×Ñ|≰×Xžz®û|³ éÁEsæ­‰]ÁÓÔ­{Þ»I~Ö³—׌µ«”îö¥Èôß|ôú 3ÄQ#äÄ[Q²eÍ,Åë$®ÅðBƲשí;Î]‡×žÑƒ‡~R^Îzµž‘¦ ëz~B_ô&ôúš‰~ÄÁ(A‘é3;{5ôoö¼EÒ¶K_e \Ðp±©Z©BL¢2%Ó»‰6¦ÎL¼È5mXϳ8ò³¾½ºf¬8X%)2ƒeoFëÕÐ?\kÕo–¸¿²ÊSÆ R¼è…QCv2ÆÃß9óÉœ©j!bûuk“µ³ÑÔE~Ñк0¯W׌õ£$Ef0ì¬õ(q‘©ýrsA(l3CèÕ«ýL\û †§¶ã=¸§”/[2®íÇÚùÅJåýL€"ÓÏÖåØì$‰½ƒˆ êßã¸n%‡³gÿ²lÅZ%4̘`çÐâRùÅ3ñŠL˯]6:AFŒ¤ž^Â{éFÂ)okÔW×i†»ÑËm’Ÿet,™02‡h Ü"’hÕ¢÷l©ÏJ% ›·WBsʸ¡ŽzP­ô-\ò GˆŸE¦Ï,îÅÐ?ó¢‚}‘ñöbšæO˜2Sz éI‘I~±-b/®™ØF¬Ò™Á²·£õbèŸù ‡âÁ‹ËùVm€pÓ§ª¿¤üóšÈ$?«V?W΋k&¶£4E¦Ïì¬û Ur¸k¾Ô4ñ žŽ­›¨_ã•pQëi„Êb£>’=™äÛlñ⚉mÄÁ*­»}yðÿæ£11·]ÀW=¤g\Ãe—~²ZzzÍW£xMd’_lëØ‹k&¶£4E¦Ïì¬û U8‘‰Ïñ$ÿ”+ó c9ì;™Ãþ—Â)w^™äý‚ö⚉~”Á-¡»})2ý77½xÃ*’L‹àµ"x½^3âD2ßã÷8œ˜5Gö,–örúQd’_x^\3áGÅ&ŠLÎ…xðbè_8‘ CÔš.õ@7¥ºü*2É/e^\3‘Ø4èy(2}6t¿¡Jw¬"Ó.UdŸ׌]6 B=ºÛ—ÿøozñ;D¦– ²È 2?/®;ìå÷:(2}faÝo¨(2íŸpé±1õ⚉mÄÁ*­»})2ý7½xÃL‘Û<$¿ØøyqÍÄ6â`”¦Èô™½úG‘Û$$¿ØøyqÍÄ6â`•¦È –½u­Cÿ(’b›9ä?/®™ØFŒÒ™Á°³Ö£¤HŠÍ<ä?–ö7ŠLÛ—£³‡ERlÉ/6~,íO™þ´«§FE‘›¹È/6~,ío™þ¶/GgФØ8’_lüXÚŸ(2}fW/†þQ$Å6 É/6~^\3±8X¥)2ƒeoFëÅÐ?ФØfùÅÆÏ‹k&¶£4E¦Ïì¬û Ur¸£Ifޔ̖Ýx'fù2%¤iÃzê½\‘¦ œ.K~Ï/®™Hç4ó‰èn_üã¿YêÅCL¢IfÞp–Ãu¸ÊSIù²%ÃeMü<§Ë’ßÅÓÁ‹k&âIàŒ™>3¾î7Tñ™fé 9uÜP)T0_DV¦È¼SPøyqÍD4¡™IÐݾ™þ›¨^¼avBdš–­Rñ1éßýˆ M‘y1¦ ðó⚉hB<E¦Ï&€Cÿ¬z2woYy‘õö<$³ç-–c'©Ï ”6¯þ0"+Md’ß¹iáÅ5Ñ„f&ŠLÎWx1ôϪÈlÒ ®4kTï"ÎK?Y-ÃÆ$ÈŽo÷¨Ï:´j,õj?ÖA™äwnJxqÍ„ÌÌ ™œ®°SdšƒiÓ©·ÌYð‘úg¿ní¤j¥ aÇI‘yQø…ÌàiôdzÚ|ì|œØ-2Ñm<ô-ýx 5lcY¹pFØÑPdžG$~a'3x’E¦'Íæ¯N;!2C÷ûÚrt8™ŽâeåÉðbèŸU‘‰º¡žÌõ7ˆM[•HÏCE4‘I~ç¬ïÅ5Ã/¿ð(2Ã3òTÝo¨’ƒiUd&=M5ô°šH÷„ö'h"“üÎYß‹kÆS_J.wVwûòà—'ˆÍ{ñ«"3éµ{ ªð¬œ<ù‡¤7ÂcW.œÕß ‰Lò;ÿ°÷ìÙ³Rõñ’¬HVéŠL·È;Ô®î7TNŠLÔ‹ÛÁC?©f"9."óB‹‘Ÿ׌C_¾¬VwûRdúoÚYdš³ç-’¶]ú*Ö/SBF鱑ƒ.2ƒÊÏ‹k&âIàŒ™>3¾Cÿìòd”¡'£bCýÔñÃ"¶pÐ=™AåçÅ5ñ¤fFí=Õ™þ›¤^ ý³Ë“iZ3ôÖSÆ ø|ŠÌsƒÆÏ‹kÆß\öˆ"Ó~¦¬1JvŠL4úòçhö„Pdž3\ÐøE9]™ÝcèÉô˜ÁØ]WØ-2·íØ%kÔWc‰æœŠÌsæ?W&=uœE¦ãˆÙ@8v‹ÌýɓϾõžŠÌs– ¿pó“Ÿ{›E¦·íÇÞLJ€Ý"½îÑ„Lž6K Òs(2ÏÛ;Hüâ3ËÙJ¼ PdÆ›¸Ãíy1ôÏn‘ Ä¡‡Eº'„"óüä ?/®‡¿F|U=E¦¯Ìé‰Áx1ôÏ ‘z É9 ™ç§xøyqÍxâËÈåNRdºl»›×ý†*¹ñ:!2ÑNè!6‘ì ¡È¼Ð:AáçÅ5c÷÷†ŸëÓݾܓé¿ÙçÅCLœ™°lè!@‘œ“@‘yáz ?/®ÿ}sÙ?"ŠLû™ºZ£î7T±ŠL„lß¹KU3mÂðYã¢9|L‚Ê“ÃxwfÿíSÌ‘I~O/®W¿d<Ö¸îö¥ÈôØ„Š »^¼aŽFdb¿`Ï#‰ª+HÕJR¤Ò¦SocÆa•§iÃz)‘I~O/®™¾ Ÿ…"ÓgSÀ‹¡Ñx24WD&ù%q«R¡„“hX·‹(2]„Ц½úÈtÒ¬A™äw1/®'íè—º)2ýbIÙ_¬þ0ª—6Û9dvSúñrß=w…õÚÙ®u‘ŸY‡_ PdúÕ²—L‘Y§f5éÔ¦‰UGU¼žs|$‘lq‰ªb‡3“ŸÃ€Y½' PdzÒlþê´ùäÒÍ‹›}°jUú®C¬òc9 Èô·}9:{˜‡Ì¤J•JÌol1ÉjOÅQÔ‚>”z¼ºœ0þïæCç(ºœ˜•ü¬Pc¿ Èô™…½.z‚ZÝZÕÔž éÓÅÅ2h{ÒÔYjïfºtiåƒ÷&¸rqe°ä =/®™ØF¬Ò™Á²·£õjèŸù°ïµì×­]Š{'íæ O`¯#3v‹›œcùY§çÕ5c}ÄÁ(I‘é3;ë~Cu)ÜØ_³~³Äw[Ö«õŒT©ø¨c‚ÂlÎüÅ’0u¦0$€Àœ6~˜*˜Ï“3‚ü¬›Í«kÆúˆƒURwûòàÿÍG/bb†«Â*8 ¶®q-ÆkÀœJ—s,V×c¤*O=ö>§úbG½äg¢—׌µ£E¦Ïì¬û UJ¸!üð$Ð|y3òÞV ¯q+©.vø=çú[ÔSÒõ7ËÒåk»‚‹ZÇ6Mcª[‡iD~Ö¬àå5cmÄÁ*¥»})2ý7½~à áצs9xè'eœôFd„fñ{‹HÁüybz‹ó¶ïÜ“xÆC^¤lY3K§ÖM¤|Ù’žŸä½ ½¾f¢q0JPdúÌÎ~ýÃEhö¼Å2{þ¢Ä‹œi&\ì bá<¡{FLŠ –yTúñ'”¨Ä¿Í ™Y.h¯õjWsÌ[êÖÔ"¿èÈûaÍD7â`å¦È –½u­_Bÿ–~²Z]‡—­X{Vóš[ÌžfÂC`ü ǘ׎á!hèßÌÏÊ•~P]‹Ã½E»FÛò‹œ˜_ÖLä#FNŠÌ`ØÙ³£D¨é}„Xܰi«¥±àé+Þ•‰ "¼¢^ ‹vðä-1æ÷ŠL¿Y”ã‰7D\K  „k±é匦/Ø–‚‡Ä·ÈgxE K±¢E<AÉøÉ/JÌãG™~´ªÏÇOÝþçBl @ñž4áB¯'ž¬EPFjvò‹”óùE¦¬È1èHa¡H8 v›q-NšB=›¸DzÝEÇñÇÚ'ò‹• ËëN€"Sw EÙ?†þE ŒÙO€kÆßS€"ÓßöÕqt ýÓÑ*ì“θft¶Žõ¾QdZg§eIÝo¨´„ÆNš×Œ¿Í¯»}yðÿæ1ñŸM9"g pÍ8Ë×­Ú)2Ý"ïP»ºßP94lVK– pÍXF牂ºÛ—"ÓÓ(ªNò†9*\ÌLê}ÕgÏž•ª{ÿtašó<ŠLŸÍ†þùÌ Žã¸fGìj™®âdã ý ¤Ù9èpÍÄO㢙‡]# ˆEflüXšH€H€¬ È´BeH€H€0b CÐݾ™þ›¼aöŸM9"g pÍ8Ë×­Ú)2Ý"ïP» ýs,«õ-®ßšV Œ"ÓßöÕqt ýÓÑßMw IDAT*ì“θft¶Žõ¾QdZgÇ’$@$@š ÈÔÜ@ì €/ PdúÒ¬ =™œ$@$@$àŠLw¸;Ö*CÿCËŠ}J€kƧ†ýoXôdúÛ¾:ŽŽ¡:Z…}ҙ׌ÎÖ±Þ7ŠLëì´,éö UÑÒåØñ’*U*Å'Kæ¥Ñ‹µ¤FµŠêßÿ}JÆ&L“ù —Êá#?Ÿß$O?ùˆ4|±¶¤IsÙEL_|µµ<_£Š”yèþ¨yïÚó½ÌYð‘´mÞ0ê²n ¿ø“w{ÍÄÄÁjQwûòàÿÍG·1áu$¶9E~±ñ³RÚí5c¥Ï,žEfxFžÊáö ¾œÞ w* §OŸ–ÅËVIË=eá¬I’/OniÖ¶›œüãOCø5Ù³Êæ­ßȈ±“%¯ñYÏŽ-/b½mÇ.%D¯Ë”1j;lÚò• 9^¦ŽuY· _üÉ»½fâ?â`µ¨»})2ý7ݾaæu$¶9E~±ñ³RÚí5c¥Ï,žEfxFžÊávè_è—³ ®ôã5¤UÓ—åŠ+.—ÃÞ’…³'ÉåiÒ$r…Gs ñ÷>ÝÚ^ðwdx¹i;©Y­’!H³H‡%{Ö,²vÝFÉ™#›ª³xÑ»åÈÏ¿JëN½eëWÛ$cÆ Ò¾åkrß=wÉÓ5ȯ¿ý.åÊ”!}:Éäi³dâ”™rôè1É{knг½Üš;§¼3}®ìܵG¾1탇¥È·K×ö-$«á…ݾs·t4Úý~ßr×·Iï.m$k–›äÔ©ÓÒsÀYº|ÜxÃuòâóÏÙGcž+ä3¨+p{ÍDÝaˆŠEfT¸˜Ùn‡þñ:›É/6~VJ»½f¬ô™eÂ È ψ9¢ úåüÏ™3²ô“ÕÒ´MW™?}¼ººX®¹újy½qýˆk ™ªÖ•Ž­›HšUeÔ¸·eõ§ŸËŒI#¥kŸ¡ræß¥s›&òñÊO¥×€‘²fÉ, õdB<–¯ü¼¼?m¬äΙCzô!g^À{ ‘Ù£ÿp™3u¬ä3Ä端w4<±ùåµWêH¹Šµ¤C«ÆRêÁbÒwÈh9pè°ŒÞWKv·W†öí"?"¹Æ‹Mdô^Røÿ E<¶ä2’_lüb‚Ͼ$@‘éK³rP)àu$¶ëùÅÆ‹“L™œ ¶À—óÉ“HªÔ©å_Cø]kx_{ùyyṪҰy{åy¬[«šjsÆœ¤[ßó¡¬“Ç ’¢wßyABEfÅêõeãª$í5WË®={•x]dxE‡N-_~#Û5“[rÝœX>Tdžþç9qâ¤\]&9zì¸ 7ÊüúûQÖ¯‹™Ø#:óí7UÙ)3æÊžï÷ÉcåJKï#ež!‘PnýÆ-òèÃIÉG«e»ÊÝ…ïPŸÁÛ™öškä–¯ÆÄ“übã|ö%ŠL_š•ƒ #2y¶>ExæuØúìaÉP™>›n‡þ%fb"îe¶3†w³K»æQ¯R»±O³‘»·ð%E櫯w’¥ó¦¨Ï¿Ûûƒ¼Ö²³™†5ö®” éÓIý:5¤Ö³•/ðdž9ó¯Ê³pÉ'rà ×+¡ ÁiŠÌ»¿KÜúî¬ùò­ñïÂÿw»,þx¥ŒÜó‚>={Vòß]FòÜ’Ó8à(uâg”-!-^‹ÜK›ÜÔ#¿ØøYYÎn¯+}f™È PdFΊ9í!àvè¯#±]GÈ/6~VV‘ÛkÆJŸY&<ŠÌðŒ<•Ãíª”¾œW®Y'­:ö–å¼+éÒ¥Mäúó/¿Ié'ªËÄQR™•KÞç"‘ ¯föl™åª+¯” ›¶ÓF¾)òドÿ,X´LÆOž.“ oi¦k3ÊÜ>’«×%ŠLˆÊÿûaÿA¹üòËÕ>ÎXùÅÆÏ {·×Œ•>³Lät·/þ‰Ü–^Ééö!&¼ŽÄv!¿ØøYY§n¯+}f™ð(2Ã3òT·o¨RúrÈúMÚÊŸþ%Ú4U'ÊîÚý½tï7\¬3¼WK"uæÏ{«4~å9tøˆ:ðgáìc¯ä/ÒgЛ2ëÑòöô9²äãÕ’0z€üý×ßF?Ú)O曃{¨pÙäDfãÕ'eŸ|Núti+»[úžÐ~<(o ï#{ 6^Çò·tïÐR~úù©Q¯±ÚŸ™Ôíä!¿ =ÙÑò³’ßí5c¥Ï,9ÝíK‘¹-½’Óíf^Gb»Ž_lü¬¬S·×Œ•>³Lx™áy*‡Û¡ᾜqЈ1“ThëÏ¿þ&·Ì'-›¼lœ »]Š'Ââß¡)tOæ¥<™ˆm:÷•ïÚ ÒK£—jKÍg*©÷uV©Õ@ äË#ýŒ“k_kÕÙ8Eö;uðÂi{eˆÄ†ý5Y‘‰°Þ-Ɖµ] AùÃCªoýºµ3¼¦YÔ¾S8´êÓ †3¼R÷9ã@¢s{McIä =keÝ^3ÖzÍR‘ ÈŒ”óÙEÀíÐ?^Gb³$ùÅÆÏJi·×Œ•>³Lx™á1 €G PdzÔpì6 €§ PdzÚ|ì< @J(29?H€H€H þ(2ãÏÜÑúç(^VîC\3>4jÈ(2ým_GÇÐ?­Â>éL€kFgëXïE¦uvZ–Ôý†JKhìT  pÍøÛüºÛ—ÿøoþñÿÙ”#r–׌³|ݪ"Ó-òµ«û •CÃfµ$`™×Œetž(¨»})2=1¢ê$o˜£ÂÅÌ$ \3þœ™>³«_Cÿz!¹nÎ!Ï×xÚv‹Mš:Kö<$[7‘B÷•—+çË5W_m{;nVH~—¦ï×5ãæ|Ó©mŠL¬Œ¾ø5ô×‘Øæ/ù]šŸ_×Ll3Æû¥)2½oCmGpæÌ¿’:u*ã'uÌ}ŒäËùô?ÿÈ—_u[¡"sÝç_HÑ»ï’Ë.»,êzì.@~ve}A$@‘D«sÌ&^Gb› ä?–6ŠÌ`Ûß‘Ñ)ù„ôíÚV:ï—üpæDùçŸ3Ò¾[ùzû·R0iÝ´ÜyGA9uê´tï7L–­X+§NŸ–‹Ý#ƒzu”«®ºRúIÚvé+Ûvì’ÿ»½ d4Þyo‘;/òd6jÑAJ•(. SfJÓ†õäî»nO¶­y.•%Ÿ¬’ËÞöÿ÷îÌv’;W ™wÞÿ˜¬ûd®òd~°øc2j‚œüãO)_¶¤t2çNç?íé¿ö:Ÿý<œ³†ßú|×Ú{϶´½½‡Ì˜ÌË ?ü²Ím˜É6fÒ§'€ÉLÏ(V)Lx¡Ò›óô§GËiukË7ßý¯\óm²ì¯³J½š6o+÷ô¹KÎ;§ìß·O*T8™aìuÏ0iyírež‘» ÙòÑ{oäçi×¥·\~IãbM¦–Óñ¿[§¬ë矷ËÔç_–W¦MtbÐ%0ç]r½Ì™9I,\š¿'ÓÉœ4ufÞ æv'N½¾\»^~þù9½^çßU¬"[·ýŸ<þäÙ´e«Œq_`ý~¡Ì¨ ÆLF’ÈÓõåàO²É„CLxŽøë"ðóÇ/ÛÜ&Œ™lc&}z˜ÌôŒb•„*½9ÿuÎt9²Jeù`ùÇÒé®ÿ‘j8¡Ç>Ý:ËYgœ&‡Ž”ÏV­‘êÕNÍ[¶I»[Zå-‹­/·w ‹ßœ™ŸGÓÕ=åäbMæuW5•«›]’²®íyK^,Z"cGÉ/óÚ›:É{zɧŸqÉ| o†²N퓤}› Å­æt䘉òæüw娣ªJùr‡;†3h“ ¿è† c&ºÖ–¼šLדi_Ÿ4ᅙ簿~?ü²Ím˜É6fÒ§'€ÉLÏ(V)LXú§7çw_Ÿ!U*W’µë6ÈyKMÕ4¹×çyû,«Ÿx‚ ÿóxçG÷öï.eÊ”‘÷3O?U®Ê[–zA³òf2ç:{ õjqKg¹¡ùÕÅšÌæ×4sf?SÕ¥KcŸ{~–Ìš6Á)oïÞ½ò_—µ”—ÿò¤,Ì[Jëž.ëÎdN˜<]öä¥é×£‹“þ³U_È·ßÿ7ºG&M}Až0Êiß«oÌ“E[¸É„_tÃ΄1]kK^M˜Ì’§y®[lÂÒ?žÃþzüüñË6· c&Û˜IŸž&3=#RdI ñæ¼/o9ì5­;93”7µ¼FÞ[ú¡ôÎÛ¹dÞËÒoðCR»VMéÙµ“|½þ¹©ý]y‡÷tÈK{ƒ³òŒ<ÃÙ7oÆsùŠOåÖ¼ýšƒûuOi2SÕµ oÿ§îÉœ0ú!¹ô¢F2qÊ ç`Ÿ7^|&ï ŸY™Ì¯×+wt Ï?3V*æ-ç½³ç@ivYc9,ïôÚùý›Lyr¤ìڹ˙qÕ™Ìñ–%¥äÉáJ ‚ÿÐJ$ž#þd‡Ÿ?~䆀ÀdÒ'xsÖÂÕ@ö¨¬^³VŽ=æhgæ²Ñyg‹Îhö¾{˜ìعSÎÈ;AöœõeÒs/ÈËÏ=)û÷ïwN—Õ<ºLöôSkË Ç—Òd¦ªKO—7ëXú°Ò²â“Ï¥fõeøþR«fõ¤§Ë¾4{®Œ{ê/²}ǹ´Éù2,ïpŸÝ»vË]}ï•/¾üÚ9ø§íM-ä¡QãdèÀ^ÒìÒ‹a ¿@0RÌdÒJ"ž#þT‡Ÿ?~ä†&ÓÂ>ÀÒ¿âEU“ùÞû8ŸHáÊž€Íü3Ù÷‡8åÀdÆI-;beéÏá0z²ÍÏaÆL=&÷e2“™{ Àôª@›Ea6ßœ³Àà9©Íü3ž»E,2š®/ÿÄ¢e$‡˜`2³ê0&¶ù9̘ɰÄ,&3f‚¥ ×ôªtñ‡õ{ýDÊO7åh[/¬*¬.×f~Œ«».Ëeí–×ÈÖñÂ\¼,6?G¢èˆ6ócÌDу¢¯“=óPkdé_¨x)ÜBŒ EMh’éD`&Ó¾þÇÒ?û4¥Eá`Ì„Ë7W¥c2sEžz!@ t˜ÌÐS @à ˜L: XK“i­´4 € &€É4X/¡±ôÏ 5ò”dŒ»ÕÇdÚ­¯‰­c韉ª“É3&«ã=6L¦wvFæ4ý…ÊHhU¢ 0fì–ßt}Ù“i_ÿãû4¥Eá`Ì„Ë7W¥c2sE>¤zM¡ ©Ù Ï3žÑÅ"£éúb2cѲ ’æ¬p‘˜±³`2-Ó•¥– JsB'À˜ qN+Àd扬œ¥%RvíƒcÆ<ƒ³b2 ‡Ð @ÀL¦?~ä† x!€ÉôB<€  ˜ÌXÈD€ `L¦e‚²ôÏ2AiNè3¡#Îi˜Ìœâ/‘•³ô¯DÊN£}`Ìø€gpVL¦Áâx Íô*/m"Â$À˜ “nîË6]_þÉ} :1 š(åÙN€1c§Â˜LËt5ý…Ê2Ü4ÇŒ DLÑÓõÅdÚ×ÿxa¶OSZ.ÆL¸|sU:&3WäCª—¥!¥Xk 0f¬•Öi&Ón}MlKÿLT…˜L&À˜1Yï±a2½³#' N“i¸@„@VÀdZ)+‚ f2é€ ÜÀdæ†{hµ²ô/4´l)ÆŒ¥Âþ§YÌdÚ­¯‰­c韉ª“É3&«ã=6L¦wvFæ|pì YµfƒÜpuciuÕ…NŒú=ëÍ¿9ÿÍÏá@(<.N«]]vocäx&(ÿ0™þRBvô…yؘérH^¶écïÎϬ}ѽøùp€ƒ;.vo+§Õ®–Ý`#µÑ0™FËCp€ à‡&Ó=òB€¼ÀdzãF.@ˆLf D"D@°Ž&Ó:Ii ¸0™ô@€@ô0™Ñ3§F@ˆˆ&3"ÐT@H €É¤;@€€µ0™ÖJKà @À`˜LƒÅ!4@ðG“é¹!@^`2½P# Ä‚&32$ XF“i™ 4€ `2é € è `2£gN€ LfD ©€ @“Iw€ k `2­•–†A€€Á0™‹Ch€ à&Ó?rC€¼Àdz¡F@ˆLf,d"H@°Œ&Ó2Ai ÀdÒ @ÑÀdFÏœ!@ "˜Ìˆ@S  @ &“î@ÖÀdZ+- ƒ ƒ `2 ‡Ð @ÀL¦?~ä† x!€ÉôB<€  ˜ÌXÈD€ `L¦e‚Ò@( €É¤7@€¢'€ÉŒž95B€@D0™¦@€@L&Ý€¬%€É´VZ@Àd,¡A€€?˜LüÈ @ðB“é…y @ 0™±‰ !@À2˜LË¥9€ P@“Io€ DO“=sj„ ˆ`2#M5€ ˜Lº XK“i­´4 € &€É4XBƒ 0™þø‘€ à…&Ó 5ò@€@,`2c!AB€€e0™– Js @ €&“Þ@ˆž&3zæÔ@ÀdFšj @ 0™t@°–&ÓZii L“i°8„@þ`2ýñ#7 @À L¦jä XÀdÆB&‚„ Ë`2-”æ@€@L&½€ =LfôÌ©€""€ÉŒ4Õ@€`2é€ `-L¦µÒÒ0@0˜&Ó`q €üÀdúãGn@€€˜L/ÔÈ@± €ÉŒ…L @–ÀdZ&(Í ˜Lz @ z˜Ìè™S# DD“hª $ÀdÒ @ÀZ˜Lk¥¥a€ `0L¦Áâ ø#€ÉôÇÜ€ /0™^¨…gçÎ2qâDyôÑGåûï¿—5jH=äÎ;eˆP#EFI}£¤}]è=óLkÄdfJªä¦cüÚ­=ú¢¯ÝÌm&3ÇÚlÛ¶Mžxâ =z´lܸñ hŽ9æéÝ»·tíÚU*T¨ãh©>[è›-±x¥G_óõÂdš¯Q®"düæŠ|4õ¢o4œsU úæŠ|æõb23ghÊÍ›7˨Q£dܸq¢%ÝU¥JéÖ­›ôêÕK*W®œ.9¿Ï1ôͱ!W¾!°xLf€0-)Šñk‰Iš¾è›H€÷çÜõLfÄìøá1b„<ýôÓ²cÇŽ¬k?âˆ#œ%´}ûö•c=6ëüd—ú†Ë7×¥£o®Ⱦ~LföÌlÍÁøµUÙíB_ôME€÷çèû&3"æëÖ­“‡~X¦N*¿þú«ïZuŸfÇŽeÀ€R­Z5ßåQ€?èëŸé¹Ñ×t…’LJɌ¯vAEÎø Ф™å ¯™ºúE2úr0™!3_µj•<øàƒ2sæLÙ»woൕ.]ZÚ¶m+ƒ ’“O>9ðò)05ôµ»‡ oüõÅdÆ_C¯-`üz%|è¼F‰¾^É™““’+V¬!C†Èœ9sdÿþý!ÕRPl©R¥ä†nÁƒKýúõC¯¯¤W€¾v÷ôµG_L¦=ZfÚÆo¦¤â™}ã©[¦Q£o¦¤ÌO‡É X£E‹ÉC=$ï¼óNÀ%g^Ü5×\#÷ß¿œ{î¹™g"eFÐ7#L±M„¾±•.ià˜Lû4MÖ"ƯÝZ£/ú†M€÷ç` c2â¹}ûv¹úê«å½÷Þó\¢ž«ßƼøâ‹Eo¦cÆŒ‘­[·z.Oã™;w®çüd, €¾v÷ôµW_L¦½Úº-cüÚ­1ú¢o:¼?§#”›ßc2â®'Æê!<^®5jHûöí¥gÏž…>O¢S¿ŸéÇlN˜0Aºtéâ%,ò$@_»»úÚ«/&Ó^mÝ–1~íÖ}Ñ7ÞŸÍî˜Ì€ôiذ¡,_¾<«ÒtpÜwß}Ò¡C‡”ùÔlΞ=ÛÙã¹~ýú¬êhÔ¨‘¼ÿþûYå!ñÁÐ×î^¾öê‹É´W[·eŒ_»5F_ô-J€÷çxô Lf:íÚµKÊ—//ûöí˨´ÄÁ±mÛ6©T©RÚ|6lêի˳Ï>›•ÙÔÓgwîÜ)úo.oÐ×·¸äB߸(å-NL¦7nqÉÅø‹RÞâD_oÜâ’ }㢔·81™Þ¸Ê¥û'/¹ä’´%5—ºöØcÅîÕÔO­£è~ÍÄ=™Znº}™]ºt‘ &ølmfÙ,X M›6Í,±‡Tï¼óŽ\~ùårfŸ}f†¾…—¦Û6~³%fç`&Ól}üDÇý™û³»“÷+?#© /ïWsŒòý9Í)“éS‹:uêÈš5k’–¢&±yóæÎïõÓ$º—2Ñ0êÒX=”GžìÒJ}˜&ÎTê‹­Îržyæ™N¶d ¹eÖ¯__V®\é³µ©³ë)¶ƒ–råÊÉæÍ›C«KÛ¾{÷nyà¤OŸ>¡Õ££o^ôµ{ü†:rX8&3‡ðC®šû3÷gÞ¯‚d¼_Ì3Š÷ç`U4§4L¦-6mÚ$GuTÊÔPVªTÉI£Ë_ÝO‘èÿë`Ö¥j0õ[˜nºÄÝofj9º$6q¯fâ,©þ¼fÍšIc)Uª”cn+T¨à£ÅÅgUóqï½÷Ê¡‡*‡rˆŒ?^n½õÖÀëq ÔYÜîÝ»ËÞ½{E×ë6,³‰¾ˆ£¯Ýã7´jHÁ˜LC„8 îÏÜŸy¿ xPåÇûÕÁLÃ|^A³JÄdúÐcΜ9ù³”Å£³”ëÖ­ËÿUQ“éÎrº³j:u¶²zõê…f=Ý=˜E—Ú]Š«&3ÕAo¿ý¶\qÅ>Z\õ·ß~s>›âšËíÛ·;¿<æ˜cäÇ ¤ŽT…}ôÑù'æêÌ©k6Õ|vØaÔ¾èkëø d€Ä¤LfL„Ê2LîÏÜŸm½?ó~%bóûs–·ºX'Çdú¯ÿþòÈ#$-¡C‡2eÊ”¤&ÓÝW˜/ÀÏNÊ'Š÷çpTÍ]©˜LìÛ´i#Ï?ÿ|ÊÜî^ÊTß°ÔY9 )îó%‰…§š M¶g³¸à:uê$“'OöØê’“ }íÖ}íÖ7±u˜Lû´füÚ§ib‹Ð}y¶£`2=êØ¢E ÑÈé.w_f¢ITc©{0Ý«gÏž¢ÿ·lV¿“©ƒMÿq¯ÄüúßzŒ÷âÅ‹EcJwa2Ó:ð{ôÍŒS\S¡o\•Ë>nLföÌLÏÁø5]!ñ¡¯?~¦çF_Ó .>L¦G–Ó¦MËèp÷Û—z¬ÎXꉬºœUñ)îÒ¥¯î¥µ¸K¿¯© éR½*Uª8†Hó%×dÍzë­·äÊ+¯ôØê’“ }íÖ}íÖ7±u˜Lû´füÚ§ib‹Ð}•ïÏñï˜L^{íµ2wîÜ´%èA;j—Ħû¦e²B÷_j=H–j¦[V«V­dÖ¬Yiã%ÁèkwO@_»õu[‡É´SgƯºº­B_ôU¼?Ç»`2}è÷Ë/¿HÆ eõêÕiK)úaYuÔ%´™Ì>º…'~GÓýY¦fµ~ýú¢'ò•+W.m¬$8@}íî èk·¾˜L»õeü¢¯K€÷«øõÆoü4ó1&Ó µ„úÏÔ¤Ñ×$5‚‹“K“Kbüš¬ŽÿØÐ×?C“K@_“Õñ&Ó;»bsf³Q=àªó‹ã Ÿ°ÈfwAXQ oXdÑ7<²¹+“™;öQ×Ìó7jâÑÖ‡¾ÑòŽº6ôšxøõa2C`œÍFæ «ç Ÿ ‰\ú†Ï8—5 o.é_7&3x¦&—Èø5Yÿ±¡¯†&—€¾&«“}l˜Ìì™e”#ÓÏ‹dTX‰†.ýû÷Ï"I½@_/Ôâ“}ã£UºH1™éÙ÷{Ư}š&¶}Ñ7 ¼?O“6LfzF¤˜7ožÜsÏ=òÑGRžR»vmyðÁ¥uëÖ•IAÞ ¯7nqÉ…¾qQêà81™ñÕ.¨È¿A‘4³ô5S— ¢Bß HF_&3zæÔ@ÀdFšj @ 0™t@°–&ÓZii L“i°8„@þ`2ýñ#7 @À L¦jä XÀdÆB&‚„ Ë`2-”æ@€@L&½€ =LfôÌ©€""€ÉŒ4Õ@€`2é€ `-L¦µÒÒ0@0˜&Ó`q €üÀdúãGn@€€˜L/ÔÈ@± €ÉŒ…L @–ÀdZ&(Í9@ uëÖòÊ+¯H©R¥ !™ú¨”-[6c Z¶l):tæÍ›gœÇMøùçŸËsÏ='#FŒÈ:oqüq¹ñÆåøã¤< €)0™¦(A%…ÏßÌ”~ë­·äî»ï–uëÖIýúõe„ rúé§g–™TˆLf D"Äì èCî /”=z¤ÌܱcG9ùä“eàÀùé{ì1Çd>óÌ3ræ™gÊÊ•+¥}ûöò§?ýIºvíê˜Ì%K–ÈK/½ääùꫯ¤{÷îrøá‡Ë¬Y³2ÖÉ\ºt© 4H.\˜q}»wï–ßýîw…ÒïÙ³G/^,-Z´åË—K:u2.„ˆLfT"F›ðü=XÍ¢Ïßï¾ûNêÖ­ëü1üüóÏwþð=wî\Ñ? sAÀ˜L[”¤…x}ÈmÞ¼YN:é$Y¶l™óp¯yóæ9¦sæÌ™™LM³eË©^½º|ðÁrê©§ŠE˼ãŽ;Cxâ‰':ù›4i"‰&ó°Ã“]»v9³£z©aýå—_ä§Ÿ~r ®–{ä‘GʨQ£œ¼ 6t~§³ Ó§OwŒ¢êüQ.ºè"yòÉ'ôZ×?ü Ÿ}ö™Ô¨QÙM¼Úµk'+V¬Õ«W;i0™ $Û`2mS”ö˜N€çoúçï /¼ S¦L}·ÐkÛ¶mR¥JÑ÷…Ê•+›.1ñA #˜ÌŒ0‘(n¼>ä,X }úô‘O>ù$i“‹Îdº /»ì2¹í¶Ûœå¶‰×-·Ü"Çwœ >\^|ñEgÖS ¢Æè.—Mf2Õ8êl£Öùúë¯;Ët¿ýö[IœÉÜ´i“3«¿oÔ¨‘ 0@¾øâ ™3gŽ“oÈ!òꫯ:æS—wi|‹-ÂdÆ­£oZ˜Ì´ˆH@ ðüMÿüݾ}»èì¦þ1X/}þêûÃÚµkÕ‚Â K˜Ì\Ò§îÐ$Û¢3”çœsN~½E—Ë>ýôÓÎ’×·ß~;k“©fòÜsÏuLª{©A,W®œlܸQ*V¬èüxÆŒÎòÔ¶mÛ¦5™Ã† sfUuÏä)§œ’_n¢ÉÔVý«èüùóßÿý÷R­Z5Ùºu«3ûª3¨³gÏNÉ“ZW¤àÀdæXª/qxþf÷üÕ?빺'ÓË %®ƒÑàØÀdÆF*͆€×¿¤êÒ=à§èL¦þÕqÚ´iÒ©S'?~|¡=™n\M›6u~Ÿ8“©†O7ô똢Wªå²z€Ö©KhtfR¯.¡éÛ·¯üñ,4“©3•º ö÷¿ÿ}¡*ÔtêÌéš5kdܸq˜Ìl:i­!€É´FJ<Åù£oºç¯>ßï¼óNùøãeÒ¤IrÁÄDa„@f0™™q"UÌx}Èýûßÿvöd~øá‡…öVêL .]Õ“i‹[.« =ÔYÇÄ=™¿þú«”/_Þ1‹:£©—î«Ô}–;w.4“¹cÇÑe³ëׯ—š5k:ËduÙ«îõÔ=šî=ºò믿Î?øGÿúùÑGÉSO=唯ù4~=L@cÕƒ‰ÆŽ‹ÉŒY&Ü``2ƒáH)È”ÏßôÏßß~ûM7n쬬=z´óìç‚€m0™¶)J{^rš÷á‡vf-uS~ƒ œƒqn¾ùféÖ­›³¶¨ÉTã©¿Ó‡Dq§Ëêò—zõê‰.}Õ“ät&Rã¹é¦›òMfÕªUeâĉÎgDú÷ï/<òˆc5¯i>xð`g/¦øóé§Ÿ:KbuVóïÿ»ós]¦ûÚk¯ÉÙgŸíÔ£Ëiß}÷]L&ã¡ÄÀd–ø.€ˆ ðüMÿü}ùå—sôYx=>bé¨ÀdŠ“ÂL! 9},]ºt¡n¿ýöB³zÅ}ÂD¿}©3:C¨R¿ƒ©³Žj0õ»›j2{÷î-eÊ”‘}ûö9ßѼþúëeäȑΌcÑë_ÿú—“ÿý÷ßwÒêÒÖK/½´Ðé²úÍK5–j/¾øbg&ò›o¾‘þóŸ¢1ꌦž<§ßÔÒå5zšíyç'gœq†clßxã g™¯šW5œºôFg@™É4¥GG®`2sEžzK*ž¿éŸ¿ú,W“YôÒ³*UªTR»í¶Œ&Ó2Ai ÀdÒ @ÑÀdFÏœ!@ "˜Ìˆ@S  @ &“î@ÖÀdZ+- ƒ ƒ `2 ‡Ð @ÀL¦?~ä† x!€ÉôB<€  ˜ÌXÈD€ `L¦e‚Ò@( €É¤7@€¢'€ÉŒž95B€@D0™¦@€@L&Ý€¬%€É´VZ@Àd,¡A€€?˜LüÈ @ðB“é…y @ 0™±‰ !@À2˜LË¥9€ P@“Io€ DO“=sj„ ˆ`2#M5€ ˜Lº XK“i­´4 € &€É4XBƒ 0™þø‘€ à…&Ó 5ò@€@,`2c!AB€€e0™– Js @ €&“Þ@ˆž&3zæÔ@ÀdFšj @ 0™t@°–&ÓZii L“i°8„@þ`2ýñ#7 @À L¦jä XÀdÆB&‚„ Ë`2-”æ@€@L&½€ =LfôÌ©€""€ÉŒ4Õ@€`2é€ `-L¦µÒÒ0@0˜&Ó`q €üÀdúãGn@€€˜L/ÔÈ@± €ÉŒ…L @–ÀdZ&(Í ˜Lz @ z˜Ìè™S# DD“hª $ÀdÒ @ÀZ˜Lk¥¥a€ `0L¦Áâ ø#€ÉôÇÜ€ /0™^¨‘€bA“ ™€,#€É´LPš@0™ô@€@ô0™Ñ3§F@ˆˆ&3"ÐT@H €É¤;@€€µ0™ÖJKà @À`˜LƒÅ!4@ðG“é¹!@^`2½P# Ä‚&32$ XF ßdZÖ.š@pL{·±$ô!Ì@°‘Àÿ‹C÷ ǤßIEND®B`‚patroni-3.2.2/docs/_static/multi-dc-synchronous-replication.drawio000066400000000000000000000046211455170150700254040ustar00rootroot000000000000007Vtbc5s4FP41frQHgY3tx9hO2u5kZ9Km7c70xSODDGxlxAoRO/31K4FkgwS2k+DGTZxmpuggjsS5fOci0nGmq80HCpPwb+Ij3LEtf9NxZh3bBn2nz/8TlMeCMhyOCkJAI19O2hHuo19IEi1JzSIfpZWJjBDMoqRK9EgcI49VaJBSsq5OWxJcXTWBATII9x7EJvWfyGehpAJ3vLvxEUVBKJce2cPixgqqyfJN0hD6ZF0iOdcdZ0oJYcXVajNFWAhPyaV47qbh7nZjFMXsmAf8b7fdj9cAuzc/wOTz92/W3Y+vXafg8gBxJl94NrXlftmjEkJCopjlghxM+C9fZ2p1BvzOVIx69kAj6ONhlQDMkeBRJejjYZUAdPZAWx/oGywRjFGFvaWtb5U2yH+dCckYjmI03ZqcxYkBhX7EVTElmFBOi0nMpTcJ2QrzEeCX6zBi6D6BnpDqmrsLpy1JzKTRA1uNpeAFV27WDPK1qOSRawLR6wdUKKSYgzFM0mixfYoiL6Np9IC+oLRgLqjcABNxvdoEwld7cJ32ewElWZJv/xNfq/bunF/OPUwyXzBhlPxE6iU7tsP/3QiDmywjjLWXf0CURdyXrnAUCN6MiKWgHGG0ZIIjl0gUB7f5aOZYUgp1S/gwDZEvX8e0f+kSYlW0KZGkP3xAZIUYfeRT5N2+JX3zUY2L4Xrn6Y5y37Dk5I56Dkp0Cbasdw7IL6QPPsEf3b7hkHeQizyODKeskbehnMGVOx25ZcmBRrXopqcpYcuqzoxLVm6qZS/wHK0roOvKNnQF6nQFHPtEunKAoavrr9OZCEY4S7mX8itgqC39iZgXShmX5Fax7VzGqQYVJX13hAmKnzqlL/MfBRYl2O5ZVg7EPdfRAFpisEYd11ILLNdmjhsYD/On+f0aJkCj2SNtbg62ylhv4QLhO5JGLMpxakEYI6sSnHhIIGOjeevo9zNbIBojLuZelCPfJFEQyBXkn8yM7aoZ25aJOaMaM+6PXm7F6ch2HA96nxfjq1/hp+zT9C+vaxpxx3axAGY/euCXAcuduiAtqE7ha9bMy0klq3f/y0Sak2NKtwhJV3yCm2yKh+TtFy1XJYmVjtkCcA7s4WhG/bYYDdpidEi8RzMatsVo1BajcUuMuIu3xAi0xchui1Fblm03WfZdySsLZoazvmtyBZb0NCCP2qqktKu5gB6rlpisvRBS1vMhgwuYooY8rCEaHRvImqNWvxq1RMWlJ8rD3sAMW4MWEuXasGXvD1tHQEhbftbI6O6DeIk4ZTDmOnqatZzZq7TGKKHRCgr175VGbdx/hpNxL2BVzzqYLK4i3xeP8xqavw7c1dTVZFpkpjBjRJbXudNW8nBZkdUUaaWS3+6f0mfBoOKzwDEzzXFdpnmq2tYsbWeiBig0yYuduNaycpjrylpI2FZCUZON8uJnXLWOgm2DeVzaWu+6reURmhAKGZqLqDrfQkJzF+V0LS6zUdNai+vMOlxgbHa46gLlLgZQBHk5blGUYC7p/Q2VWhtuaic22PZxLc5yo6WitIMNi/0mszXDkosdcieYJsXbLqMN8pt8gkc0klEPFR4hAlydb/iLuVLFXEh+ruR+dGuv/1Qb3UYmaaSu2dpza2zUPZWJqu1cGnvvt7H3dCN+xcZe/VGCWSK9zaOEp6vq8LHPbz5KAGYXdjZ1LgnrJWF9F+ewQ/fsstThJUt921kqeLKVnl2a6hpWhvwAKZETykISkBji6x11woEk9rdWsJtzS4R+cz3+ixh7lIoUPa5nBWUl3kKZ+95CSlFsfa8WKMKQcSytYEydUOWjdwKid9rrAg1kOMxUeTBIA8TkY5putvt4gbrMfOxSVbyzquIZoHN2ZYX7TsqKZ+jq7D5RskfnESLUl7s5wu6T+fj3BIOBFsoNDRRh63SxwEwvVUYZOnlEgGmxE3XKwSKGUQsHc219pnDwqLDxQCdntJbmL1jFhK4grmV2/xh7IccWkqV84pcix8sRvemI55lH9ULqDadEwhbzaNI52UnikYikvLnmrFB+/i5X6ZS/MK9Dqi4P7mPHqfiA+hLsua6lppDlMkUn8Rp7/Ieh2fA3odn4ldFM2WUDmh0GE/MrrBNCYFuffLXH6ALKLZD/DAhXp58vhnCO4CN3WEVw59wR3DF72q+J4IebE9aRUK/+FqA9qH9Za6j/h8n52JCqFHIuch68VTm33pVrkDMf7v4EsoCZ3R+SOtf/Aw==patroni-3.2.2/docs/_static/multi-dc-synchronous-replication.png000066400000000000000000001011261455170150700247010ustar00rootroot00000000000000‰PNG  IHDR-^{ø ztEXtmxfile%3Cmxfile%20host%3D%22app.diagrams.net%22%20modified%3D%222023-03-13T14%3A25%3A38.635Z%22%20agent%3D%225.0%20(X11%3B%20Ubuntu)%22%20etag%3D%22qRkI2CaWlAzzNtbhExOR%22%20version%3D%2221.0.6%22%20type%3D%22device%22%3E%3Cdiagram%20id%3D%22SVgELWPNXIlR7V7eDs_m%22%20name%3D%22Page-1%22%3E7Vtbc5s4FP41frQHgY3tx9hO2u5kZ9Km7c70xSODDGxlxAoRO%2F31K4FkgwS2k%2BDGTZxmpuggjsS5fOci0nGmq80HCpPwb%2BIj3LEtf9NxZh3bBn2nz%2F8TlMeCMhyOCkJAI19O2hHuo19IEi1JzSIfpZWJjBDMoqRK9EgcI49VaJBSsq5OWxJcXTWBATII9x7EJvWfyGehpAJ3vLvxEUVBKJce2cPixgqqyfJN0hD6ZF0iOdcdZ0oJYcXVajNFWAhPyaV47qbh7nZjFMXsmAf8b7fdj9cAuzc%2FwOTz92%2FW3Y%2BvXafg8gBxJl94NrXlftmjEkJCopjlghxM%2BC9fZ2p1BvzOVIx69kAj6ONhlQDMkeBRJejjYZUAdPZAWx%2FoGywRjFGFvaWtb5U2yH%2BdCckYjmI03ZqcxYkBhX7EVTElmFBOi0nMpTcJ2QrzEeCX6zBi6D6BnpDqmrsLpy1JzKTRA1uNpeAFV27WDPK1qOSRawLR6wdUKKSYgzFM0mixfYoiL6Np9IC%2BoLRgLqjcABNxvdoEwld7cJ32ewElWZJv%2FxNfq%2FbunF%2FOPUwyXzBhlPxE6iU7tsP%2F3QiDmywjjLWXf0CURdyXrnAUCN6MiKWgHGG0ZIIjl0gUB7f5aOZYUgp1S%2FgwDZEvX8e0f%2BkSYlW0KZGkP3xAZIUYfeRT5N2%2BJX3zUY2L4Xrn6Y5y37Dk5I56Dkp0Cbasdw7IL6QPPsEf3b7hkHeQizyODKeskbehnMGVOx25ZcmBRrXopqcpYcuqzoxLVm6qZS%2FwHK0roOvKNnQF6nQFHPtEunKAoavrr9OZCEY4S7mX8itgqC39iZgXShmX5Fax7VzGqQYVJX13hAmKnzqlL%2FMfBRYl2O5ZVg7EPdfRAFpisEYd11ILLNdmjhsYD%2FOn%2Bf0aJkCj2SNtbg62ylhv4QLhO5JGLMpxakEYI6sSnHhIIGOjeevo9zNbIBojLuZelCPfJFEQyBXkn8yM7aoZ25aJOaMaM%2B6PXm7F6ch2HA96nxfjq1%2Fhp%2BzT9C%2Bvaxpxx3axAGY%2FeuCXAcuduiAtqE7ha9bMy0klq3f%2Fy0Sak2NKtwhJV3yCm2yKh%2BTtFy1XJYmVjtkCcA7s4WhG%2FbYYDdpidEi8RzMatsVo1BajcUuMuIu3xAi0xchui1Fblm03WfZdySsLZoazvmtyBZb0NCCP2qqktKu5gB6rlpisvRBS1vMhgwuYooY8rCEaHRvImqNWvxq1RMWlJ8rD3sAMW4MWEuXasGXvD1tHQEhbftbI6O6DeIk4ZTDmOnqatZzZq7TGKKHRCgr175VGbdx%2FhpNxL2BVzzqYLK4i3xeP8xqavw7c1dTVZFpkpjBjRJbXudNW8nBZkdUUaaWS3%2B6f0mfBoOKzwDEzzXFdpnmq2tYsbWeiBig0yYuduNaycpjrylpI2FZCUZON8uJnXLWOgm2DeVzaWu%2B6reURmhAKGZqLqDrfQkJzF%2BV0LS6zUdNai%2BvMOlxgbHa46gLlLgZQBHk5blGUYC7p%2FQ2VWhtuaic22PZxLc5yo6WitIMNi%2F0mszXDkosdcieYJsXbLqMN8pt8gkc0klEPFR4hAlydb%2FiLuVLFXEh%2BruR%2BdGuv%2F1Qb3UYmaaSu2dpza2zUPZWJqu1cGnvvt7H3dCN%2BxcZe%2FVGCWSK9zaOEp6vq8LHPbz5KAGYXdjZ1LgnrJWF9F%2BewQ%2FfsstThJUt921kqeLKVnl2a6hpWhvwAKZETykISkBji6x11woEk9rdWsJtzS4R%2Bcz3%2Bixh7lIoUPa5nBWUl3kKZ%2B95CSlFsfa8WKMKQcSytYEydUOWjdwKid9rrAg1kOMxUeTBIA8TkY5putvt4gbrMfOxSVbyzquIZoHN2ZYX7TsqKZ%2Bjq7D5RskfnESLUl7s5wu6T%2Bfj3BIOBFsoNDRRh63SxwEwvVUYZOnlEgGmxE3XKwSKGUQsHc219pnDwqLDxQCdntJbmL1jFhK4grmV2%2Fxh7IccWkqV84pcix8sRvemI55lH9ULqDadEwhbzaNI52UnikYikvLnmrFB%2B%2Fi5X6ZS%2FMK9Dqi4P7mPHqfiA%2BhLsua6lppDlMkUn8Rp7%2FIeh2fA3odn4ldFM2WUDmh0GE%2FMrrBNCYFuffLXH6ALKLZD%2FDAhXp58vhnCO4CN3WEVw59wR3DF72q%2BJ4IebE9aRUK%2F%2BFqA9qH9Za6j%2Fh8n52JCqFHIuch68VTm33pVrkDMf7v4EsoCZ3R%2BSOtf%2FAw%3D%3D%3C%2Fdiagram%3E%3C%2Fmxfile%3E\4»" IDATx^ì ¼MÕÇ—©Á¬Q(Dš¨„"„ eîo*C’1B†ŒÉ˜9d®e® QKå*¥îùå×ýÔµ÷ ú~Ûºöš¬ôr›æôh…‡"²7“€î"‚.ñùëo¶PïÃi¯§oÎ{õêÚžŠÞY$˜ƒ½ˆˆ€%Á‰å ,"ôl߈n¼á::c¼Œnزƒ¦|°„º·mHùnÌ¡:3ô­Y”ÖêW/OY³d¤Í[wÑ„é !£åý÷žýÓÈÉs©R™{©Béÿ^p#¢›@G@g¡qƒ:Ê_ÓÞ›u™ßƾѦBÂW6‡õi$"ÂSGSÁü7Ó©¿NӲ嫩çëÃhúÄt{Ⴊþæm»Rºti©S»”ýºkiåë¨KÏôî„áTĸçÑš¨Z•Jô\£ºôYÜêØ½?}¶`:Ý3{Xûp€@0 è,"p\åÌ0éñ¹PÁ[¨ô£u¨×+í•è;ùèƒ9Óç¿ÌA‡^ƒDD ¬ˆz"Ã;~¡Ù WQèr†ÚUÊÐmÿf ¤tRC¸CEÓò>Ž£ƒ¿¥ÖMkgL˜þ ½Ñó#S!u|çæ.^CW/®U+” %qëéó5éðÑJh€ˆÑÀM h:‹ fL¤jõžKÒ™ŒŒ„fN¤?ÿ<©~?{Áâ$_fùwшfcoŒžH{öþJ#ö¢5k×Ó+F–Ág½GéÒ¦·gÌ[oÓW^A5ª>BåªÖ¥-_,V™f|U7ìnÕ¬1UzøŸe¸@@ 1E]âsµÊ©ý+}è½I#þ#GQ©ŠµèûuKUæ.HŠ@X!©C—3„VšÒ&‹VD„m»öÒ[,¼Ú‚XP8c,whT§RXOŽž2Ï6òBDK 7€è*"ÜïÝTâ¾b4jüÔdXâ¾¢´uûN:a|Æ™ #ÆM¡m?îºì~+"§¿vé5Pe° pÒXîðjç6IÚrþüõbšýúkÕïùÛ¹²UêÒ§  €@²ttŠÏ&ü¿ÏŸ§ï¶n§ —ÒÑcÒð¯bd‚€@Šñ9UƒVý/%w‡) _ùõz}8-3…+Ò¥‹¯·ï QÆRŠ4¨oWê?t mþö3´]wí5|  –€n"‚Žñ¹–‘¡öñâϨ_Žñþ(W¥ èÝÉ8çž°> Á$±ˆàær†sÆQŽ,L|ŸO^øït†cfÐUÆ&] j>LY2eP'8Lxïu‚Cþ¼¹â=!˜ƒ½+txÂÏ™‘œº^x©»%጑qÀ¢A·¾CÔÉ æé M^èhý{5½Ò¡¥!dS'8ðf‹|‚C®œ9èáÇë+‘![ÖÌñí¦56aLú¿r­øe@üK@7AÇøÌÙÕn¬ö@(eÛ»hiœ:}'náLÊ’9“z ˆE7–3œ5–+¤6Î!ç³Èyß…ºÕÊS‘O~à^ž66Vœ¹`9}c 'O¦\9®¥ÚUËP±Û $€!¦1 (º‰|¬ã¾ýhÙŠ5–ýÄ“ÿh3Μ>C©Œ êÔ©¨pÁüÔ±M3ã[ªÿŽÑ凌|Kßxüø º%ßMÔö…gÔ‘a+¿øŠžiùòeöŽ5–eT*_Úr?P@ÀßttŒÏ<‚âÖ¬£¡£&Ð/¿î7ŽLÏeˆÁ/&ˆïþe耀‹‘TËr†HêÇ=  `7ÝD„ܹrÒÀÞ]¨…±ÖÉ“§¢ÆÁé¶·*H¯ _6Ür†¨A°€n"â³ NG ZˆXDp:A Z0@ÀwtØ·* „>!Ú‹xì7xT‚b¢¥ˆûAÜ  ›ˆ€øìƨ@ @DàØ àE»aAD°›(ê°ƒ€Ž"‚ý­ñÙn¢¨@À‹‘4†å ‘PÂ= ’@D ÂKª¤ [@LŸñ4€È$VDÞ«%ý~äxDÖ_Mj×ëÍ$ï­U¹4Õ®R&¢zp€¸E"^RÝkh@ :Ÿ£1¸@À-aE» ˆ`IÔ `'ˆxIµs<¡.û@D@|¶o4¡&; @D°“&êÐŽD¼¤j7ha0„DÄç€ ut´#A;—Á`; @DÀKªã uØG"â³}£ 5ØI@‰õ[õ¿TÛØ³ÀÉë¶‚y©HÁI—n_@‡ º  àˆ.@F r @DëX lÒ'éÒí öèAïAœ$ÁIº¨@@<ˆâ]AJ@ú$]º}6è6€€  "¸M€È%A®o`€@° HŸ¤K·/Ø£½p’D'é¢nñ "ˆw (é“téötØ Û .€ˆàd4 —D¹¾e Á& }’.ݾ`ô@ÀIœ¤‹ºAÄ€ˆ ÞE0@  ¤OÒ¥ÛÐaƒnƒ¸@"‚ Ñ€€\äú–›€ôIºtû‚=zÐ{' @Dp’.êO"‚xÁ@€>I—n_@‡ º  àˆ.@F r @DëX lÒ'éÒí öèAïAœ$ÁIº¨@@<ˆâ]AJ@ú$]º}6è6€€  "¸M€È%A®o`€@° HŸ¤K·/Ø£½p’D'é¢nñ "ˆw (é“téötØ Û .€ˆàäh›øàãU4ÉjU¬v•2T«riõï9‹VÓì…«ðsp0}íØÀýö€ˆ`?SÔ v>I—nŸ>@  ˆ à2é“V—qxÞœtà%Õó!@<" =þI·Ï#·¡Y€ˆ'£‹²HŸ´Ê¢å¼5Òý—TçÇ@4- SÌÿ™qÈ‹æ‰pö^éñOº}ÎzG^íˆÏˆÏòF¥-‚ˆ з¼l/1c“Ið¯M mª"‚M Q ø€€ôxàÄQuAú$]º}QÁÆÍ œâ³,ADåe NI¾% ýyÃKªo‡:&€ôx ™£&IÒísÔ9¨\&€øì2ð0ÍADåˆý“üM@ú‡^Re?dÉò‡ÝÖÀ¿v­>éñOº}±Ñׯ4ž_ý|Åðo4´œ¿"‚óŒ£nIÔÈ´*ÿÊrDYþnôñ"ìhHŸ¤K·/Ö~¸ñÙ^Dt!AOÁNßÀ‡œ,WJ÷^R1^d€5 àéñOº}îyJFKÒ?ÏeP‚ `ˆöp d-#&Í¥¯7o£T©RÑ¥KD™2\M÷+LêT¢4©S'Ëä‹õ[û/уÅo·…ÛÎÝûiê‡K¨ßËMm©ÏéJð!ç4áèê—FçO§ïF&‘Ó„½­þõ–âÖ¥Ç?éöÉò¦óÖàùuž±—-À¿^Ò¿¼mˆ²ü¡¬Ñå!a¡@¾\TµB %"ìÚ³†¾5‹jW)CKß“,ÙyKÖÐÅ‹—.;âïó(]Ú4Q{äô™³´ÿàaÊŸ7WÔe½( ‹½`ãE›¼ Ž6A@&éñ@&5笒>I—nŸsžAÍ à>Äg÷™§Ô"DYþPÖèò„Š&ÆñÓ?¡´iÒгõ£%+ÖÓÂå_ÑÉS§)wÎëè…§§C‡Ó¸w>R·W{ä#‹èØñ“´÷Àïtý5Y¨ñ“ЬOVQÜÚÍêžÒÅï ÿ=QŽR§NEÍ» §>@ ?ÿŠÒbCåò÷Ó#ÝK?ÿòMš¹H›LC.Ð&IÞð’èái¹óȳ†Nz<°Ö+}KIÒíÓ×óþ¶ñÙšŸ­qsªD§ÈÆP¯.Ib'óCÆ@Õ*=@÷ÞU:öO}%9³_CïÌ^ª²X\ÍDX¼âkš³p5µoV› çÏC«¾ú–ÂC—ëQjCa8v&=ü`1zø¢JD(R0/µjZvüô+½6ê=š<ôeúuÿïboA/*ýyÃKª¬ªK&2Ŭ]ük­wú•’ÿ¤Û§ŸÇc³X—çñÙšŸuñ¯µÞéW "‚@Ÿéòp\¿e»Úÿࢡ¤K›–J{"<[¯²Úóà¯Óg)s¦ôtò¯Ó4{á*:qâ/C¨q™ˆ°õÇ=ôR³:ÊCÆ}H%ï½Me ðµö›hÅ—›•¨À"‹«Ó]·Ý¬~×â•4¤{3úÝÈnÐ)Aÿ |41 "‚#X}[©ôñb‚G¦˜o‡` :&}’.ݾ@ £³ˆÏÈä Ú˜÷²¿¼¤¯yÛI½¤š]ºpñ"͘¿\‰Y3g¤«®LGY2eHRD8pè5ýߣªh÷ÁS詚¨p<êÿ¼iâ„÷>¡]ŸW"ÿ5sõ»–]GÒ nÏk'"èò!§ùðŒØ|éþÀKjÄ®tåFéã%9™b® 4b3éñOº}6»C|uˆÏÈä?H}d D9Óí®¤$"ð Ÿ|¶–^iUŸ2§6¬þê;ÚôýÎ$E„ƒ¿U{!ðÅ™¥î-rÃW›¶Ñg«7ªzXD`рňn{Û¿íIéÀKª¬±§K&2Ŭ]ük­wú•’ÿ¤Û§ŸÇc³X—çñÙšŸuñ¯µÞéW "‚@Ÿéò¤$"|·¾6–:tiYΞû›û@MþÛ=WK-g8söoªW­ñž¡"BÜÚ-´$n=u5D>:rà›3è¡wQÅ2÷øFDÐÅ¿ GL‚ˆàVTê1dŠYs€ôx`­Wú–’>I—nŸ¾ž÷·åˆÏÖü‹øl›S¥ "8E6†zuyHR ‚¼ˆIsè—ý‡(çõ×(`úœÏ¨iÝG)Cú«iÌÔyTåá”&Mê"ï¥ðáÇ+iåº-Šà÷ÝNu ±÷]ðK&B CE ýyÃKªN@•ȳædéñÀZ¯ô-%=þI·O_ÏûÛrÄgkþE|¶ÆÍ©Rœ"C½xHb€‡¢ %éÏ^R£t¨Ã·ë’I„L1kAÿZë~¥¤Ç?éöéçñØ,ÖåùE|¶æg]ük­wú•‚ˆ ÐgxH:ÅF“à_aÚPD ¨ éãÅt2Å4(}ÜUé“téöùxh$Ù5ÄgdòmÌ{Ù_ˆ^ÒGÛ$ Ë‡\Pœ#ÝxI•5¥Y´` ÄF@zü“n_lôõ+ø¬ŸÏ`±¾ "èë;X®)|ÈÉrœtà%UÖxA&‘,Øm ük7ÑØê“ÿ¤Û}ýJãùÕÏgÑX ÿFCËù{!"8Ï8êðDL«ð¯,wADåX^¼dãEÛÒ'éÒíóÂghœ"€øìYkõBD°ÆÍÑRxHÅ‹ÊA éÏ^R1`AÀ=Òã{$d´$=þI·O†aØCñÙŽvÕÁ.’6Öƒ‡ÄF˜¨ Âþ¼á%UÖF&‘,Øm ük7ÑØê“ÿ¤Û}ýJãùÕÏgÑX ÿFCËù{!"8Ï8êðDL«ð¯,wADåéÖH/ÒùÁ>ˆ†€ôIºtû¢aí‡{ŸýàEôAtñìô |ÈÉr¥tà%ãEXîÿ¤Ûçž§d´$ýó\%Xö€ˆ`GÔÀ‡\Ĩ\¹Qº?ð’êÊ0ˆ¸dEŒJËá_Yn“ÿ¤Û'Ë›Î[ƒç×yÆ^¶ÿzIÿò¶!"Èò‡²‰@§Øhük#Lª‚ˆ`DT>! =øsÄÝ>I—n_Ä q#h@ñY–“ "Èò‡²ÆÏÉÖm;èÄÉSô牓ôÃöIÒ¿­PÊœ)#™ tLòéÏ^R}4ØÐñ¤Çñm6Pzü“nŸÍî@uÀû³¿çG-ËMCD°ŒÎ¹‚~y‰Y·~ýóg#ýºÿ7Úgü‰öÊdˆ E Q…÷3þU.°‹€ôç /©vyÚžzIdG©µÀ¿²<#=þI·O–7·Æ/Ï/ÞŸ“+~ñ¯óO‚;-@Dp‡sT­èüÌY°˜–._eüY}YŸ ßš_ 7æÊI¹sÝÿ{3ë€38C¯}û(áa«ñ³“FæBèU©|iªT¾ ÕªöXT\¥Ü¬³¥0´ÓˆvÒô]ÒÇ‹ÿ=€‰€ôIºtû‚4V¸¯:Çg¼?m´êß_ˆúûPD8øõ<ŠNü+äº!•4²8{€³Š.hÙÎ_ Aá‡m;•8±ÖÈlØà ª‹³š6|’Ú´hb¹n/ êü!ç/§Û”:=¢«_úx‰®7¸dÿ¤Û'Û»ö[§c|Æû³ýã5ºC"‚;œ}Û g4|®müþµžxÔȨL%‹s¬Ïk¿ÞHs,¢9-Qm°H1°w—˜„ ÇŒM¢b?äÜ䃶@@2dIöNì¶Á¿±3´³é“téöÙé êÒéùÅûsô#J'ÿFß;ýJ@Dè3xíÖý÷ÞMƒú¾b,Uøo™‚Óh9C¡ß Q´lÅ%$|4s’ÓMÚR¿Nþµ¥Ã¨@4!‘W–£¤OÒ¥Û'Ë›°&”ÞŸ£ˆÏÑ3s²D'éZ¬[§‡„/]X¹è‹½½X‹v]•ðî„áŽf@Än)j™Ž;F›7oVÆñ¿7mÚ¤þ5kV*Z´¨úw–,Yâÿ-³±[¥Óço콕_ƒôIºtûä{8¸âý9zß#>GÏÌÉœ¤k±n]>n¦Z½çŒMsª,/NMàt°'ê>«N~€ˆ`qÀ¡€@ÄüI´{÷nš?>Í›7V¬XqßCoÌ—/•+WŽjÔ¨AÕ«W·T‡ÄB~ð¯D®Vm’>I—nŸUÓåùÅû³µ¦‹­õN¿RúL—‡„÷&xêùvŠ /';¬Ÿ«Ë–~¾Šú t¤."‚.þøhÀ$ðœ€."obPqqqJ4à?,"$¾B³ B³B³B³—g1ÿ”-[–X`Àv>I—nŸ>Щ]â3ÞŸuU°5906, ‚f%|ì"¿ÈÇ0:qñ>_­ßL#ÆM‰x9ŸØ ‹ˆ Ë‡œþC  ;ÝžßiÓ¦Q¯^½,˜“~Î&`Ñ Ú‹—;pÃÔ©Sã—A˜u4iÒ„zöì 1!Z¨¸ÿ2Ò'éÒí ÚÒ%>ãý9h#ÓŸý…ˆàO¿ºÒ+3ò¦ŠµøˆÇ“'OÅ·]"äˆÇÜÆDßÊ1¦h°uûµãÛwÆ×Ïí¶iÑTý|Ôø©\ñ:`Ð%“ˆ'ù5kÖŒî¾ûnµü€'øævy’3Ì,Ž?®ªn×® 6Ì®f\©GÿºC@#Ò'éÒíàBWMÐåùÅû³µa¡‹­õN¿RúL—‡$4¾7i¤"ÉK –._EŸ._@PÅÌKÂퟰÕ Nû$¾ ßšŸJÜWŒš>U'~éĈ±S´tñ¯ÀG&D@€³Ú·o¯6HÌ›7¯ÊD`ñÀ‹—Jp{œÁ sçε”íà†½‰ÛÐå›L/Ø M{àýÙGÄgkÜœ*Á)²1Ô«ËC’T í6oÙ¿›®[¿Qýê« ÿìþÉÅ‚‹ ,1„‡Û HrÏÝD„HúŽ{@@À Î(_¾¼*Ú¶m[>|¸•jb.Ù¼dbÏž=JHX¾|yÌuºQ.Ÿ¿n°@ ÎÀû³5®ˆÏÖ¸9U "‚Sdc¨W—‡$\Œ×z•,^,’b îˆ52°H@z&ÑÍ7߬–0ðžœàåÅ™¼É"/o˜2eŠkÙ±ôYºcéÊ‚€ß èòüâýÙÚHÔÅ¿Öz§_)ˆ}¦ËCbG´¿n"‚.þµÃ7¨üF@²Èkf!ðþœ áâ½xo>µÁêq’ú@äŸCéáýYþX‚…á @DÏw$CAÐÚÐÐåCÎZïP üM@òóûꫯRß¾}©iÓ¦4yòdŽàl„lÙ²QÆŒ}nNˆ° F€ø“€äø ÁŸc.Ƚ‚ˆdïÇØwˆÖêò!g­w(þ& 9“ˆ÷ ˜?>IÊDॼĂ¯K—.‰’ý+  èòüâýÙÚ@ÑÅ¿Öz§_)ˆ}¦ËC‚ hmðèâ_k½C)¯´hÑ‚ÆOW]u8p@ĉ¼/CïÞ½µ òz5zÑ.‡ÞŸ­ùñÙ7§JADpŠl õêò Æàd› „NØ‹-ªNDÈš5«Í­D^5ÉK+ÌK‡L]>#÷îFïÏÖ<‚øl›S¥ "8E6†zuyHcp2Š&I`À€4qâD:räÝvÛmÔ¼ysjÔ¨‘­´¾ÿþ{ªW¯}ûí·¶ÖëFe“&M¢çŸžÒ¤IßÜ7ÞH]»vU?·zñšuN;ß¶mµjÕŠ¾þúkKU½òÊ+Ô AºóÎ;©råʪ®ªU«ZªKj!É™D¦ˆpå•WÒÙ³g‰…>ÿvûâìƒÄ§Cè "Hö¯Û>D{  ]ž_¼?[Yºø×Zïô+A ^[+ IDATÏtyH¢ ‚æ½ápW*_šj=ñUz¸L¸[ãïÔé Ë–-£Š+FlG¤7êâßHûc×}ø€:D™3g¦9sæÇ <¶µ\¹rôÞ{ï©1z%øw<1Ë’%‹|ðAŠ‹‹£¶mÛÒÁƒ顇¢±cÇÒ5×\£2¸>æø×_©¶¹MÞ5ßvîÜI¼®ž3.\¸ ·ß~;žkëÖ­éÌ™3Ô¦MZ°`ú7 mï¼ó 2„HÙ³gWÿgñ‡ëâL„ï¾ûNe•lݺ• *¤l*V¬± Ã?Ï›7¯òé-·ÜBýû÷§òåË»=ü"nOòókŠ<þÚµk§Æ;ÿ9~ü¸ê‹üðzì¸X,à1ÇϦù|r½\?ÛÃc™Å)s¬ÚÑ&ê¤8ŸùÝ„³»úõëG:tˆ¾_ߟcƒ ´"A+wÉ2Öjlݼ µ}á¿u²f¯–~¾ŠFŒ›BÛ~Ü¥~Ô­c+júÔ“a;m—ˆÀ“W>"'{üÒûæ›oÒÓO?¶ýhopêC.Z;¤ÝÏߨ?ûì³ÄÕºuëÒwÜ?ùøý÷ß©@Äsvß“3gNêÖ­›šÌóD…/¾øBMÈOžï(o~ƒÊ“í~øAÕ½fÍ*\¸p²í²pÁ"Ä’%KÔXâÉz³fÍÔ$>9Óÿ¹=Erçέläo/QH,"ðKÍÌ™3•ˆÂÀ©S§§>úˆJ•*¥ Îèà ?×Å;÷ó„鍊nRŒyÉOþ“xƒ¾wß}—>þøcúõ×_U}¼Æžž˜ò73Ü?^~ùeµD$4á‰'žP"£>ªÚéÔ©=óÌ34}útõœíرƒ~üñGå ®… ~AûôÓOiÕªUÒ†g¼=’3‰BEs)±È‚OðM1;Ã/Å,&ðRs¹‹QÉ-}àzÌÌþ7gšpüwèÅ'CpÛ¡ÙC:‰’ýëåC¥f)ÓÇR3/Gçm;õüòþ.üÅâ: ÷|”n,b‚ßÞŸÝò¾SþuË~¿µA GuyHì‚ìŠ_÷ rUê)¯äΕ“âÎ ë¡XD„¿ÿþ[M`Lñ€'a|ñ·©üM®—.þu¢ïáêä ú˜1cèóÏ?WÔÏ=÷ñ:{þ¶ü >}ú¨Ikž|Xù‚Yó½:îWnl»ñû¤D„ÐvyÂÏ/Ã,¬™‚@rv±ÈÀþN,$¾ŸÇ‹xf†CR9ê$"@ä½|D`©Yø§KÍÂ3Òýþìûã?T78«ÏX\H—.]TÝóÃûsT¶éfÄg›@ÚT D›@ÚY.‰A96x¶ }µáŸµ¼+ΠsÝ"ÞXDþ¦™']œ–m^üí*«Í¸Ü#ÔZiÎ*ào¸o½õVµ4€ÓÜùœ'­©¾ÕçIîùóçã3"xÎßÊsCrír ¼”€¯uëÖ)1„mL,"ð½ÜGÎpàeümßÇßÞs,X„^Ie"°è2hÐ Ú¿¿úv„Å“…Y–EÊXÎ.`ö¼¿oŠÈë÷yÒÏ“V󔞈ó)æñq)‰ü­;ïÀß ð‰<©å yò©ˆÀû&ðæ€,ðDŸ'Ëæ^ɉä‚0ðxrΓWž¨âŠŽ‹¼#/?Àe€äL"ˆÖýj–”ìߨ{g­,5ÃR3–šñèvêùååˆææÍÖž¢„¥üþþl£¤êpÊ¿NÙë÷z!"ô°.‰Õ Xâ¾¢T⾄ûü°}­]¿Ie ðUøÖüôÞ¤‘”9SưrJDÛ°Åtñ¯Åî9RŒ³Ž=ª¾½æL^ã+:¢ã¥ãÝb÷DÞËb©–šé""ÄÜ©ïÏÖ8#>[ãæT)ˆN‘¡^]«A0%4¼„¡iÃ'©éSOF$ p]º‰1 Àåã6yN}çý pEO€—RðÞe—? @DˆÝ¯º|þÆÞÓÈkÀR3,5ƒˆùóÉxŽ„Òå÷ >[ãæT)ˆN‘¡^]«Aðþ{ïN‰°nýÆø#»ul¥„h.ˆÑн ±œIdUDàÓ2ø„“¤.Þ¬“÷±àÍ4ùisCÓhê´±¢dÿFÃÜí{±ÔÌ:qd‰Yg—¸¤.Ï/ÞŸ­ù\ÿZë~¥ "ô™.‰Õ ˜x¯Þ á¡Êÿ£“'OGêe¤¸…3#ÎB`÷é&"èâ_LÏ HycxãMþz={V©ÊŸ6›ŸvèÐ!*?è$"DÕ1ܬÆ–šÅ6 "ÄÆ/´´äøj'ÞŸíó9jòŽDïØkß²]AAÌž¿ˆ:÷ ˜T*_šÆ{-b>º‰º|ÈEìÜ" ùùUDàÓKš7ožÀ›|Ô(ŸÈÁÇrò1œ™°aõ¼(Ò "B¤¤ô»KÍb÷–šÅÎЬAr|vBDÚû³}#5ÙA"‚Z‡"#lðl›øe ïNN%‹'Ü|19Ì:Ñmð€€äL"'DñŸþIEŠ¡}ûö©cIùØÒH/DÉþ”7î ÐåùÅû³µª‹­õN¿RúL—‡Äî ¸uÛªVï9å‘ܹrÒG3'E´¬A7Aÿ |4`€@ œ¸Ù&MšÐ´iÓ¨Zµj4þüˆ}¡“ˆ Ë7™ÃÇ âàýÙšKŸ­qsªD§ÈÆP¯.‰ÝA‘õ4Ц½7KÑK¼wBrHub(  ÉpZDhÚ´)M:U—¾dÉ’ˆ=!bT¸@ ðþlÍɺ̬õN¿RúL—‡Ä‰ ºÉ"»fÁŒ‰T¤pÁ½Aà †I àS’3‰œNž<©–3ìÝ»—^zé%:thÄÖIDì߈ãF(]ž_¼?[ ºø×Zïô+A ÏtyHœ‚ìŽÐMKÜW”¦Oá+Aÿ |4`xN@²È너pîÜ9Ú´iuêÔ‰âââÔé _|ñ/^[ãæT)ˆN‘¡^]sD~AühæDº1× 1ôÚZQ¶¡l•ºtÂøû›UŸDt$¤µ–P @d€ˆ»tùü½§¨@À+x¶FñÙ7§JADpŠl õêô˜K rçÊI{wIqï‚$Y”•Ü׆Œ¦¶ï$¯³!ìîêI@r&D„ØÇŒdÿÆÞ;Ôþ& Óó‹÷çèÇ¢Nþ¾wú•€ˆ Ðgº=$ærFɧ)4iø$U*_Ú1²,Ìùh1ÍY°XµQë‰GiP߮޵gwźù×îþ£>Й€d‘"‚Î# ¶ƒÄJ@r|NªoxŽÕã(ï%ˆ^Ò÷QÛ<±ïôêë´ÿÀAÕ«L™2*!¡ä}Ũð­ù©Há‚–{ûëþF¶Á.Z·~#-]¾šöý{jC®rP—[S¥‡ËX®Û‹‚º}ÈyÁÈÍ6¥ûƒƒôÎMqn"A[)<^ "`è‚™€äøœœ_ðþä«wß!"èí?qÖ/ý|Í^°ˆ–­Xs™m·* ö,(a æÅÿçŸóµný¦øŸÿ°}ñš±ÐŸ™¿¬XîAC (ö˜Hqpþ5HÇ9©,í°Kº? "ØáeûêœI!v?Köoì½Ó¯éñOº}úy<6‹u~~ñþÞ÷:û7|ïô»"‚@Ÿùá!a€Ï¶e€3~5²Ì,…hg̘Š"Ãm… Y E©DñbÚožèÿFãCé÷BDî!Ø)ˆ‘’Jþ>éñ öêUƒôIºtûôò6¬exF|ÖåI€ˆ ÐS~~‰á´-¾ø4…­Æ†ˆ‰¯ÐÌ^ÁÿÇNþ¼á%ÕIïû«nˆ±ûSz<ˆ½‡zÕ =þI·O/oÃÚ”àý™ñYÖ3A–?”5xH:&ù–€ôç /©²†žäL¢áÇSûöí©mÛ¶Äÿ–p;vŒ²eËFY²d!þ·ôK²¥³sÂ>éñOº}NøDrx~%{'vÛàߨÚYD;iÚT›@ ­þ•刲ü!ÝÉãeÅŠT¾|y*Z´(mÜøOÖ—××Ô©S©iÓ¦T¶lYbûp@4¤OÒ¥Û k?Ü+9>û/ú¡ "`<€€Ëð!ç2ð0ÍI÷^R1^¢!/_>Ú³gõìÙ“xyƒ—gÜ|óÍ*aÊ”)Ô¤I/ÍAÛÿ¤Û§¡Ëc2YúçyLCaF"‚0‡ÀÿÀ‡œ,K÷^Reé™Df6Sk×® 6Ì€›6mRY, è”… Ý¿ž8ÓÃF¥Ç?éöyè:OšÆóë v×…]CQC"ÂäîMxHÜåívkð¯ÛÄSn"‚,ÀšØ ðŽ?Nœ™À 7޽âjؽ{7õîݛؾX@˜7oeÍš5‚ÒÞß"=xOÈ] ¤OÒ¥Ûç®·Ð8KñÙY¾ÑÖ!Zb.܇ÄÈhþ% ýyÃK*†ªœ P£F µ´/Þ'¡\¹rJLàÛyq¶Áüùó•XÀ™æŠ’6xŒ´¿ÒãA¤ýðË}ÒãŸtûü2Ð`ˆÏ²ÆDYþPÖà!è˜ä[ÒŸ7¼¤ÊzºeqFg"˜bÓ䬸g XÉ`‘"..Neð¿C/*¸M΂ÐíÒÍ¿ºñÖ^éñOº}ÑòÖý~<¿º{0eûá_Yþ…ˆ ËÊ<$b£Ið¯0m¨ "‚ T…ôñ’œ+8C€3øO¨ `ÞÏB‚™¡úoÎ*0E‚Ð'n§zõêJ”àlŃ a­º*}’.Ý>­œmƒ±ºÆgºŽ*@Àu\GŽƒNr²F€tà%ãÅn¼o)(p6•+oÞ¼J003¬Ô2 Ž€ôø'ݾp|ýö{éŸç~ãþ›D„`û½÷€>ä<€žB“Òý—TYãů™DÉe$—¡ Ë+öYãWÿÚGÈÝš¤Ç?éö¹ë-ï[Ãó뽜´þu’nôuCDˆž™ã%ð8ŽØÓà_Oñ_Ö8DYþ€5 à%éñÀK6^´-}’.Ý>/|†6AÀ)ˆÏN‘µV/DkÜ-…‡ÄQ¼¨þ¼á%Ü# =¸GBFKÒãŸtûdxV€€=ŸíáhW-ì"ic=xHl„‰ª@ éÏ^Re adÉò‡ÝÖÀ¿v­>éñOº}±Ñׯ4ž_ý|Åðo4´œ¿"‚óŒ£nÁ‹‡¤x¹jtüÏ”*U*eoÎ×Ó Ï4¤zuª©ÿŸ9s–ÆOy,\J¿úÝø}vªùø#Ôâ™§(mÚ4—õñ™–/ÓÓõjQù‡JEÝÿ»~¦9-¡ÎíZD]V‡^øW.^ÙÁ+òz¶+}¼èIVƒ@Ò¤OÒ¥Û´qåE|ÆûsÐFúk€ˆ€± pœòæ`º£H!:wî-^¶’:tëG gM¥‚ùóQÛνéä©¿Œ‰}sº1÷ ´qó÷4jü4*`ü®_÷—Qܺm‡®É–%jÂ6}KoŒžHÓ'Žˆº¬¼øÓ‹W6J÷^R½I·+}¼È¢k@ 6ÒãŸtûb£¯_i/â3ÞŸõ'°Øìá¨}-¡AÐìL¹*õ¨c›çéŠ+ÒÑàoÑÂÙS)]Ú´ñ}匄!ÆÏ_ïÝ9ÁÏù†çÛt¡uª‚CNêÖwå¾!'­Y»žòܘKÕY²ø=tè÷Ãôrþ´ùÛ­”%KfêÚáEºÿÞ»©fƒætøÈQªX¾4 {½M{oM~÷C:vì8¸% îוnÉ—‡Þ™1—¶ïØEß‚žý¿Q±»n§^]ÛÓ FÅÛwRw£ÝŸ÷쥻ï¸ú÷ìD7äÌNgÏž£~ƒGÑÒå«éú뮡gžþŸ‘Qñ¨«þóâCÎÕjÖ˜tà%UÖ€B&‘,Øm ük7ÑØê“ÿ¤Û}ýJ{ñüâýÙ½qâ…Ýë~-ADè3/’Ð ø÷ùó´ôóUÔ¦S/Z0c¢±´`1¥¿újz©ÕsÓ *×nBÝ_nMÔ¦1Þ¦U_|M3§Ž¦^¯§ó.ЫZÓgq_ÐkƒGÓêOgQh&‹•jÿ45ª_›Z´ëª2š4¬£ì›9çcê=à¿¥ÓÆ ¥â÷Ü•€d¨ˆP­îs´~åÇ”!ýÕ´c×n%N,2²FŒB›¶|O¯viK7ç½)¾|¨ˆpîï¿éĉ“tí5ÙèØñ?i¤QæðÑc4b`O%"ð ¾ý¦*ûî̹´ëç=ôXÅrÔÈhšo |q¹uë7Ñ£¢2Ö1Êö¢{ŠÞ¡~ÇÙ Ò§§W:´8`’¼xÞ¢é^R£¡…{A 6ÒãAl½Ó¯´ôø'Ý>ý<®ŸÅxvïýñYÖóA–?<Ì=#y͘Ÿ7²zviw­ZO57öIxJÜW4Y¡åK=héüwÕïÚý ½ØáU%"ðä~ðˆñÆþ q”9SFz®q=jø¿ 2Ο¿ îYøéçtÝu×*!‚SDؾó§ø=ÞŸµ€~4þ_ôÎÛiñgq4ö~ lºtéÝzOyÊscÉÔñ¿{äáÒÔþÅȳ,˜éJxIÁ¹E&‘PU ÿ r†aŠôø'Ý>YÞtÞ/žß¤–3˜=Åû³½>÷¿ööÀ_µADèO/’”‚`ÜêµÔ±{Zþñû”1c†xb¿ÿq„ÊU­K“Ç NQD`ÑàÓyï\&"pVBî\9èª+¯¤¯6l22º÷½K{÷íßXñ£EËhâ´4ÕÈvÈ–5 Íýx ­Xµ6^D`Ñ ï¿;š"BÅr¥UùÙïŽSm5öR˜5=oˆ%®¡–häÈ~úÝ/¿î§téÒ©}ܺ¼ð¯[}Ó±ˆ:zÍ;›¥ïÈ e°Ÿ€ôIºtûì÷ˆì½ˆÏxvïýYöè žu‚çó${œRäϵîLýušztj£Ndرógê3p¤Ú¸pä ^–D®óÖ·P«fèÀo‡Ô†Š gO1ö*øƒ^ú&Ízg,½=c}úÙ*š2v09}ư£‹ÊDxó¾j9CR"B'ãhȇ¯O¯÷ìL”¸‡™ ¿ìÝOo|^}í ã¸Ê3Ô§[:øûT¯i+µ?BâL '‡…rNöG÷º¥û/©²F˜ãåú믧£GÆÁ{ã7Ò+¯¼BÍš5SpNŸ>M¤éÓ§Ó¾}ûˆÿôÓO«{Ò†l†k’¬\¹2µjÕŠªV­5Üï¿ÿžÞ~ûmÕ.pš€ôø'Ý>§ý#­~/â3ÞŸf"K°Ç9œc«UÍá‚ o¶8jÜTµôà÷ÃGèö©Cëç“~ âƉ üÿÐ+tO„ä2XèôêúÙXâ9s&záÙ§¨Á“ÕéøŸ'¨VÃæT¨`~ú*d} IDAThœüðbÇWS~R+òr‡þCÇ"@{Cl8œ¤ˆÀË.6'>ô4ƒ_öP¶ ìÝÅÈzÈ©ö}à W~ñ•‘–š5©oløøÏ^n]^|ȹÕ7ۑÊU^d±ˆ°xñbº÷Þ{fÎÒìÙ³•Hðí·ßR‘"E¨~ýúô矪‰ýÍ7ßL_~ù%õéÓGýnܸ2²B¯M›6)¡áºëþÉÈŠæZ³f uïÞ–/_M1mîõ¿ÚÀñÀPéñOº}¸ÌÓ&½x~ñþìžË½ð¯{½Ó¯%ˆ}†‡D Sl4 þµ¦ UAD°"ªp”@¨ˆ`6tË-·PÿþýéJc9g° ÀK³Ì‹3øç“&MJðsþýO1ú &ˆ à2|ȹ n¥7^x¥_ø“å!éþÀKª¬ñâE&QJ"ÓyüñÇ“gNªý xÿ>†‘÷8à gΜiIDà:y¿ÞÀ÷Bà 9³€÷*à ?Ÿ1zôhµgŸñ×_);xigðr†¤D>A¢@jÙE… T& , –-[ªzXxØ¿¿Z†ÁBEbÄÉá…ìîuKÒíÓÝÿÑÚï§çïÏ—{ßOþvlK¼"‚@¯xýœ?ÁH'M• ¥Ô*¦H‚à9#÷Š]Å#m+TDXûõ7Tüž»)Mš4‘÷ì>¯ýëYÇ…6 A¨c`Vþ(=fLÔ|¤}³òãø2š¿D•Ê—IRDàzš>õdŠm8qЦ½?‹æ¼;^ÙÀ©b÷—¯F fN¤eË×Äï‰`f"Lœ6ÓÈ@8¥ìäkÇ®ÝtâÄIºãöBêïk¯ÉFÇŽÿI#ÇN¡ÃGш=]^ù×õŽjÒ DM%ÄLéãE&˜¶>I—nŸ-NШ¯â3ÞŸ5$0Õ6lC©E?[0®É–•Ö­ßDϼø2å¹1w‚Žuhý<»«uë3˜¾Ûú#åÍ“›Ž=Nê×2–-ÜIϵîBq gÆ—áû ßZ Iá‰Ê©Ê#åSlë”±$aÙŠÕ4jpïø:ÿß3Ô»k{úöûí—‰ýŒ ƒBo¡Æ ê$°›Å‡Á#ÆÓÂO?§ë®»–2¤¿Z ^ˆ^}Èé?Bétà%Õ¿[­Uúx±Ú/”‰¤Ç?éöIô©“6yŸñþì¤WQ·T¤zÆ»8~þÑ{”-kÚõójf,`QÁ¼¾7ö9È{SnðÆ›êG¯vn£ŽßêÒk Ý}ÇmTÙX6ðà#µL„OÔ|Õ¨ÿ<Õ®^%I¡zÕGTöBJmñÒ…·ßŸM³ß§ê»pᕬP“f½3––KÌÓÌL„q“¦ÓyãžNm›«û¿Ûºöî;`d0œ§‰ÓfÐÔqCUÿæ~¼„V¬Z Áƒq&­I¯^:"倗ÔHI¹s2‰ÜáìU+ð¯Wä“nWzü“nŸ,o:oWÏ/ÞŸ÷-·à•Ýé~­@Dè3¯’Ð xÑX®PõÉgT†ÁÿjV¥•k¾¢—Œ½ V/™Ezô§‚ùo¦v-Ÿ¡ŸvÿBÿkü¢±9bãÞÚj?‚» A¡£‘±°~ã·ô´±_BNmRRjk™±ÿï‰0nxzø¡R4~Ê{jãÄ?˜ll”8û2á§Ý{©Y›.ôþäQ”ÙXnÑ¢]7z¤BµÓ÷§Ÿ­¢)cÓ™ÓgTÆg"¼ùF_×G€Wþu½£š4AGÁLÛ ð) |Â×_m{ݺV(=èÊÕªÝÒ'éÒí³Êå¢#€÷çèxY½ñÙ*9gÊADp†kLµzõ„Aî Ýû¡m?î¢Ù¯W™¥î¿‡8#á¥WúÒ_§OÓ]Æ ÷½“&¾=ƒf½=–.]º¤Ngà2¼ŒáŽÛ Rî\7¤("¤ÔŸÎ0ÏÈH›.-mÜü=Ýœ÷&л3å¿9o²§3|8ïóÖ;tÊ8oüá²P_cóijgÎÒ‹_¥í;~R+6ü_ ê?t õéÖžyø¡˜ü…Âzðêy‹”^R#%…û¢!pöìY㤳´mÛ6ud£g~¥NÚ–c‚í´+’º¤ÇƒHúà§{¤Ç?éöùi,Hî ÞŸÝñâ³;œ#m"B¤¤\¼É°YDXùÅ:u„$.p‚€ôç /©NxÝzÒ3‰öíÛG•*U¢»ï¾›-ZD9s椡C‡RÕªUióæÍÔ®];zðÁiñâÅ4~üxjÑ¢…ÊD3f ­X±‚¾ûî;:xð U¯^~øaêß¿¿‡Í:Μ9CmÚ´¡ ÿ»bÅŠôÎ;ïÐÕÆñºÙ²e£I“&QË–-U[¥J•¢9sæPÑ¢EðÒ¥KSçÎé‰'ž°î‡KJ÷¯ÃÝW½ôø'Ý>quØ <¿þ~†~€¢¬"B”Àܸ ‚ ã müC"FB4¤n¼ñFjß¾= 8–/_NuêÔ¡;wÒ”€Ð³gO%&lÙ²%ˆÐ¥KÚºu+¥M›– (@*T ¹sçÒØ±ciÖ¬YJd˜6m >œ–.]jñ›F‰/¿ü2Õ«WO‰ÕªU£Ñ£G§÷dRb‹]»v¥C‡QÁ‚•@qÕUWEƒ÷˜€ôIºtû‚6t¤Çg7ý/áܤ̶ "ÓïÚôš<ôÇaãDˆÛµ±9œ¡ø GÈÝßK÷^RÝáZ“>^XDÈ›7/;vŒ2f̨ºÃûZµjQ±bŨ|ùòôǨ¥6lH "¬ZµŠf̘¡Ê<ðÀÔ©S'ªQ£ýôÓOJPøù矣rOïc“%KÚ³g5lØž~úijÞ¼¹X´03>ýôSêÓ§­^½Ze(ðÿgÎüïôžp¬ñ{ÿ¤Û´$=>»é?¾?»Ém…'!<#ܶÀ‡œ­8c®Lº?𒳋m­@z¦‹,ð7ÿæÅY×_==þøãÔ AúþûïÕ¯‹¼”³ø*S¦ 0@e.°XP®\9%"p½Íš5Se9[ ÞœÑ~üñGÕ_¼çBŽ9”Ѹqczê©§¨nݺ¶úÃîʤû×îþJ¯Ozü“nŸtÿÚmž_»‰Êªþ•刲ü¡¬ÁC"Ð)6šÿÚÓ†ª "ØUˆ!`f"?~œ2dÈ ìªY³¦î»ï>5‘ÿöÛo-‹,ð5räHã(ß+éÙgŸU3š"/›¸öÚkãypD•*U¨cÇŽ´wï^µÌAò%=Hfç„mÒ'éÒísÂ'¨¼"€øìù¤Û…ˆ ËÊ<$“|K@úó†—Tß=G:fî‰À“vÎ$X¹r¥Ú$ñ‡~PY±Š¼¿Âí·ßN½{÷¦íÛ·Çï±Ðºukµœ!±ˆ0yòdêÖ­/^\mÆ(ý’¤ó³Û>éñOº}vûõ€—Ÿ½¤yÛdù"‚@À$þ¡„—TYãOz&‹œqP¿~}zÿý÷Õ¾ƒV{ð‰ ±Š7nTû œ:uJ ¼ÜaÈ!ôå—_ª!‹¿ýöåÊ•‹XLhÒ¤‰,g&atÿŠh³ÒãŸtûlv‡øêðüŠwQL¿1á³½0DÛ‘Æ^!’ØJ®þ•刲ü!Ýéã…E„’%Kª¥.ø„†Ý»w'Xæ Á6Ø Ÿ€ôIºtûä{Ø^ ¥Çg{{‹Ú@À[¼åÖHr²œ.ÝxIÅx‰†€$áï¿ÿV5~üñÇêd\ -éñOº}ÑòÖý~éŸçºó…ý J"ƸLr.Óœtà%UÖx‘žItúôiZºt©:ÖÑë‹÷e`áwÞQKt¸¤ûW†vÚ(=þI·ÏN_èPž_¼dÝFø×:;'JBDp‚jŒuâ!‰ ðâð¯,ADåX^¼dãEÛÒ'éÒíóÂghœ"€øìYkõBD°ÆÍÑRxHÅ‹ÊA éÏ^R1`AÀ=Òã{$d´$=þI·O†aØCñÙŽvÕÁ.’6Öƒ‡ÄF˜¨ Âþ¼á%UÖF&‘,Øm ük7ÑØê“ÿ¤Û}ýJãùÕÏgÑX ÿFCËù{!"8Ï8êðDL«ð¯,wADåéÖH/ÒùÁ>ˆ†€ôIºtû¢aí‡{ŸýàEôAtñìô |ÈÉr¥tà%ãEXîÿ¤Ûçž§d´$ýó\%Xö€ˆ`GÔÀ‡\Ĩ\¹Qº?ð’êÊ0ˆ¸dEŒJËá_Yn“ÿ¤Û'Ë›Î[ƒç×yÆ^¶ÿzIÿò¶!"Èò‡²‰@§Øhük#Lª‚ˆ`DT>! =øsÄÝ>I—n_Ä q#h@ñY–“ "Èò‡²‰@§À$ßþ¼á%Õ·CH@z<ˆÌQ“¤Ç?éö9êT.@|vx˜æ "ÈòDþ€Iþ& ýC /©²Æ2‰dùÃnkà_»‰ÆVŸôø'ݾØèëWϯ~>‹Æbø7ZÎß ÁyÆQ·úð¿g/\¥ê¨]¥ Õª\Zý?÷‡¨ ØN"‚íH}]¡ôñâkøè\àHŸ¤K·/hñ9hG½$ÁKúh@ÀsÒ_:ð’êùI`€ôñ"‹¬ØHÒ틾~¥Ÿõó,Ö—D}}ËAl ý¥/©68ÙÆ*)œÌ8‡ ª²H@zü“nŸEìÚC|F|Övðjh8D “Aì#Á>–¨ @ì$ }’.Ý>;}º@@ ”DŒ@€ˆh÷£ó ‚ HŸ¤K·O°ka€€æ "hî@˜ ˆ±ñCipŠ€ôIºtûœò êˆ &!ÐîGçA>I—nŸ`×Â4Í @DÐÜ0@ 6bã‡Ò  àé“téö9åÔ  0@M"B Ý΃& }’.Ý>Á®…i š€ˆ ¹a>€@l "ÄÆ¥A@À)Ò'éÒísÊ/¨@ "` €šD„@»L@ú$]º}‚] Ó@4'AsÂ|Ø@DˆJƒ€€S¤OÒ¥Ûç”_P/€@DÀ4ˆv?: ˜€ôIºtû»¦hN"‚æ„ù ±€ˆ?”§HŸ¤K·Ï)¿ ^€ˆ€1 hí~t@@0é“téö v-LМDÍóAb#!6~(  N>I—nŸS~A½  c@ Ð "Úýè<€€`Ò'éÒíìZ˜  9ˆš;æƒÄF"BlüP@œ" }’.Ý>§ü‚zA@"Æ€@  @D´ûÑyÁ¤OÒ¥Û'ص0 @@s4w ̈D„Øø¡4€8E@ú$]º}Nùõ‚€DŒ@€ˆh÷£ó ‚ HŸ¤K·O°ka€€æ "hî@˜ ˆ±ñCipŠ€ôIºtûœò êˆ &!ÐîGçA>I—nŸ`×Â4Í @DÐÜ0@ 6bã‡Ò  àé“téö9åÔ  0@M"B Ý΃& }’.Ý>Á®…i š€ˆ ¹a>€@l "ÄÆ¥A@À)Ò'éÒísÊ/¨@ "` €šD„@»L@ú$]º}‚] Ó@4'AsÂ|Ø@DˆJƒ€€S¤OÒ¥Ûç”_P/€@DÀ4ˆv?: ˜€ôIºtû»¦hN"‚æ„ù ±€ˆ?”§HŸ¤K·Ï)¿ ^€ˆ€1à Ó§OÓøñãiÈ!´oß>Ê—/µmÛ–Z´hAW]u•'6¡Ñ`€ˆL¿£×É@|ÆèB@ú$]º}Rü;ì#€ølKÔˆ±ñCé( ?~œFMǧ?þøã²ÒÙ³g§—^z‰Z¶lI™2eвvÜÑ€ˆ=3”ð'ÄgúUç^IŸ¤K·OgßÃö„Ÿ1"¤€ˆ Í#>µçÈ‘#4tèP3f q weË–Z·nMíÛ·§¬Y³†»¿Ë "XF‡‚>!€øìGú°Ò'éÒíóá\—Ÿçrm: AWéièhàÀ4aÂú믿¢îDƌՇŽ;RŽ9¢. ŽD„p„ð{¿@|ö«gýÓ/é“téöùg$¯'ˆÏÁó¹n=†ˆ ›Ç4±÷矦×_¦M›Fç΋ÙjÞ'¡iÓ¦Ô¥KÊ“'OÌõ¡0 @DÀXÄç y\ßþJŸ¤K·O_Ï×rÄçàú^·žCDÐÍcÂíݺu+½öÚk4sæLºpá‚íÖ¦M›–6lHÝ»w§ Ø^?* ˆÁóyP{ŒøTÏëÛoé“téöéëùàYŽø<ŸëÞcˆº{Pˆý7n¤Þ½{Ó‚ èÒ¥KŽ[•:ujª]»6õèуî¼óNÇÛCþ%Á¿¾EÏþ!€øŒ‘ +é“téöéê÷ Ùø$oû«¯üåO×{³bÅ êß¿?-]ºÔõ¶Í«V­J½zõ¢ûî»Ï3а¾ "èë;Xž2ÄgŒÝ HŸ¤K·OwÿûÙ~Äg?{7}ƒˆ ?ÛÞËS§NQ•*UhåÊ•–ëæSÚ¶mKåÊ•#¦#FŒ cÇŽY®íùä“O,—GÁ`€ˆL¿û¹×ˆÏ~ön°ú&}’.ݾ`=z‹ø¬‡Ÿ`exÂ3ÂIàx“C+W¾|ù¨qãÆÔ®]»Ç7²€0|øð˜Ä„qãÆQóæÍ­˜…2%! Ž÷q·Ÿ}ìÜ€uMú$]º}.ZtñY 7ÁÈ@Dˆn¹œ@ñâÅiýúõQ¡añ gÏžÔ¤I“˱˜0oÞ<µÇÂîÝ»£j£T©RôÅ_DU7›D„`ûß½G|ö£WƒÙ'é“téösÔÈî5â³lÿÀºÈ @Dˆœîü—À™3g(C† tñâň˜„ŠÇ§,Y²„-·gÏÊ›7/M:5*1Oo8}ú4ñ߸@ "¡„{t!€ø¬‹§`g$¤OÒ¥Û cÜãÄg÷X£%ç @Dpž±ïZàý Ê—/¶_‰Å^¾À¢g°@`^¦``þŸ…Þ/3xÃÄhÅ„5kÖÐ<Ö>ÜL"ÆŸ >ûÉ›è‹ôIºtû0‚d@|–åXˆ±ñ di>¡[·nÉö^¶À¢_qqqT£FøMùç,ðþ¦¨À‚‹ü;þ9ÿž/®‹ÿÏ{(ðÅ÷·oß>Å L;v ¤oÐéè @DˆžJÈ%€ø,×7°,zÒ'éÒ틞8J8IñÙIº¨ÛmÜ&îƒöøHÅ… &ÛXDàkÚ´iIîÀâ@R'1$÷sL!E’»X°˜;w®H£ n€ˆàe´áÄg·H£7HŸ¤K·Ï ¡È >GÎ wÊ'A¾ÄY˜9sf:qâD²vqºVÙ²eÕïyÙÿ?Ö«hÑ¢´qãFUÍüùóUfCr×µ×^KüñG¬M¢|@@Dˆ£ÒMÄç€8: Ý”>I—n_@†‰6ÝD|ÖÆU04"€„[þ#°uëVºýöÛSDréÒ¥¿¯Y³¦:m!¹‹—2¤t C¹råhùòåñÅ9ƒ![¶l)Ú°sçNÊŸ??\a @D‹7hBñYGÁ̈ HŸ¤K·/bиÑqˆÏŽ#F.€ˆà2pÝ››8q"=ÿüóÉv#ñ„Ÿo1bDüþ¡ù^^öÀs¶é˜TÖ/]hÛ¶m‚6‹+F›6mJÖ^FѨQ#ÝqÃ~@Dp2šp…â³+˜Ñˆ‹¤OÒ¥Û碫ÐTˆÏ"~#Áou¸?M›6U›&w…î‡À÷$µô€÷=6lX’{%pÆ·‘x¿„Ð=¸Þpû"4oÞœÆç0ª_¶lU¬XÑ•¶Ðˆý "ØÏ‡* sIDAT5þG`éÒ¥T©R%W >_ŽñÙ•¡çX#Ò'éÒísÌ1>©ñùrGºùþì“aØn@D¬ë­u¼P¡Bôã?&[˜E€êÕ««ßóѼ—A¨ ÀKxÓCþyrgðËph¦ œ¥p÷Ýw«bÉmØhÖyçwÒ–-[¬u2ÂR| D=(}úôtäÈ‘Ká6i "Hóˆ¿ìáØuöìYêׯuèÐÁÑÎ!>ÿ‡ñÙÑ¡æZåÒ'éÒísÍQš6„ø|¹ãÜxÖt¸ÀìD "`HDLàðáÃtÝu×¥x? Y²dQ÷ðòó¨Fþ?kÞÛ€„ãÇÇßZ! yóæUÂ/YÝ+!4Ë~óÍ7'kKêÔ©U™2eЏ‘ÞÈ/§¯¾ú*¥I“†R¥JEo¾ù&=ýôÓ‘Ç}Â@D柙ÃYTmÚ´¡ .ïÓ·o_GÄÄç⳿ é“téöùk4ØßÄçË™:ùþl¿Q£— "xI_³¶,XŸe”éœeðóÏ?Çÿ*±ˆ`f)˜Y,*p¶‹¡Y 扗B$^*Á"BJ2.^¼˜}ôQ[(ÿý÷ßêXIS<8uꔪ7{öìtðàA[Ú@%Þ€ˆà ÷ µzýõ×ÇŸÙK¦˜ÀâBºtélAøŒølË@V‰ôIºtû„¹S¤9ˆÏ—»ÅÎ÷g‘N‡Q¶€ˆ` Æ`TÒ¹sg4hP²mÒ¤ M™2%YÁÜ× ôØGS]ž`nÎNDwêOøYȈõJ,†ÄZʃ€€I ±8j• â³Ur( I@|ÆÈä @DÀ舘@™2ehõêÕÉÞŸø…äDxÏÎDظq#™G<òò^‚`Ö“xòžxsÅäN}0 äÍyÓ;.ÎDàÍ Y˜H›6-!Áª2ê@&‚ ?øÙŠÐoº2dÈ –6ð²ÎD¸âŠ+lé:â3â³-IX%Ò¿é—nŸ0wŠ4ñùr·Øùþ,Òé0ÊlÁèÿJΟ?OW_}5ñßÉ]¼ñaÙ²e‰3 X H|#ï…À¢_ü{_¼ÿ‡ñÙÖ¡åyeÒ'éÒíóÜ @|NÚAN½? 0/J¢ÔÛ×®]K¥J•J±û¼Î—¯”ö*`‘€÷@wñÆ‹I‰ \Ž~ôèQUol˜ÒÅ¢EJ'A„³#¥ßóË*ŸÎ1cFâMÍpéI"‚ž~ÓÅjŽWgΜQ§3Ø-˜ Ÿ/ ˆÏºŒçzøœ<['ߟó(jv“D7ikÜÖСCþ›"BJg•Gº¾,¥¥ æž Œ“3g<„bæ,–-[:JÞÍs†íH@+‡ˆPÇ»ÔíO?ý”yäG[C|N/â³£CÏñÊ¥OÒ¥Û縃4oñyS²tãýYóáxó!"~D Aƒôþûï§x³¹—Aè&‰‰ °êËÙæ1ÉU˜R6Cr{&$U×3ÏI—n“÷Ÿ½÷,p†Dg¸ú®Ö5jo”î2÷EX8à=Ì«]»vÄ’ZÖÀG=²HÀÌ+´<ÿ›‘Œ‹‹#¶)Ü!!ü"Æ€îŸu÷ ìOŽ€ôIºtû0²¼'€øì½`3 "8ÃÕwµ¾ûî»mÈ“|^^À›ÕpÆŸhÀÿçM“ºxi‚y±‘Ô5oÞ<µa#/…È–-›¸\¨0‘ðE‹Ñc=æ; Cö€ˆ`KÔä Ägo¸£Uç HŸ¤K·Ïy¡…pŸÃÂïu%AWÏy`÷ã?NŸ|òIØ–y#CB—,Ô¬Y“X ˆö Ýÿ€Ëò†‹ü³”öA0Û¨U«Íž=;Ú&qÀ@D˜Ã}Ú]ÄgŸ:6àÝ’>I—n_À‡˜î#>‹q ±‘Daú½ª“'ORñâÅiÛ¶ma»Ú¤Iš2eJü}œ5ÀK"É0 qVïkùÈ?TŒ¸óÎ;‰w,OŸ>}X[qC° @D¶ÿýÒ{Äg¿xý% }’.Ý>Œ&ŸeøVØK"‚½<}_Û®]»Ô‘‰Ã]戡÷q6gp¦ïkøâºyég$Þó wïÞjIC¸‹3 ¶lÙByòä w+~ƒÀ/ŸýâIôÃ$ }’.Ý>Œ$9Ÿåø–ØC"‚=U /ixâ‰'È<Ò1¥Î›-&uOªT©.ûqr÷󦎑l¤˜:ujZ²d U¬X1P>Ag­€ˆ`JÊ#€ø,Ï'°È:é“téöY'’N@|v‚*êôŠD¯ÈkÞnŸ>}¨gÏža{an´˜ø$†Í›7«Œ†ÄWR"ßéFФN:…µ 7€€I"Æ‚ß >ûÍ£ÁíôIºtû‚;räöñY®o`Yt "DÇ wÿK€³8ÁêF‹¼¬—'„nÈBAãÆ‰÷S0/l¤ˆ!ç4ˆNFýn@|v›8ÚsŠ€ôIºtûœò êµNñÙ:;””E"‚,heM,ÅDÚQl¤))Üg•D«äPN2ÄgÉÞm‘>I—n_¤œqŸ»ŸÝåÖœ!Á®©5šbœ‚‚"Œz!"ÃÏAì%âs½î¯>KŸ¤K·Ï_£Á_½A|ö—?ƒØˆAôºÍ}Žf£››&l¤h7ÑàÕ!x>RŸƒämÿõUú$]º}þþê⳿ü´Þ@DšÇêo¤Ç/ÚÝü€¨sçÎvW‹úD"B€œЮ">Ôñ>è¶ôIºtû|0|ßÄg߻ط„ˆà[׺۱h6Š±Ë²ZµjÑìÙ³íªõ”D„€:>@ÝF|³}ÖUé“téöùl8ø²;ˆÏ¾tk :!nv§“¼Q ÛÈ뼜¾ .L6l ôéÓ;Ýê÷9ˆ>w0º§ >c èH@ú$]º}:ú<ˆ6#>Ñëú÷"‚þ>ÕƒmÛ¶QñâÅÕ «So¤ÈBþüùjõˆD„9;à]E|øÐ°ûÒ'éÒíÓÐå5ñ9°®×¶ã´u\ÃÜ()Êõ»®–ADÐÕs°Û Äg+ÔPÆ+Ò'éÒíóÊoh×ÄgkÜPʼáîûV—,YB]»v¥o¾ùƶ¾,X^{í5zòÉ'm«DŒ @|šÇõí¯ôIºtûôõ|p-G|®ïuë9DÝ<{Al%ÁVœ¨ @l# }’.Ý>ÛŠ@@ ˆ &!ÐîGçA>I—nŸ`×Â4Í @DÐÜ0@ 6bã‡Ò  àé“téö9åÔ  0@M"B Ý΃& }’.Ý>Á®…i š€ˆ ¹a>€@l "ÄÆ¥A@À)Ò'éÒísÊ/¨@ "` €šD„@»L@ú$]º}‚] Ó@4'AsÂ|Ø@DˆJƒ€€S¤OÒ¥Ûç”_P/€@DÀ4ˆv?: ˜€ôIºtû»¦hN"‚æ„ù ±€ˆ?”§HŸ¤K·Ï)¿ ^€ˆ1ðä“OÒœ9s(uêÔ zGÇKç»!"èì½(lç XºtijÛ¶mŠ¥š6mJ  nݺÅß7lØ0%"Lž<™î¾ûnÚ²e 5nܘZµjE-[¶T"ÂêÕ«éÃ?TevîÜImÚ´¡«¯¾šfÏž±•±ˆkÖ¬¡îÝ»ÓòåË#nïìÙ³tå•W&¸ÿüùóG5jÔ õë×S¡B…"®7êI"‚ž~ó“Ոϗ{ñÙO#Üz_¤OÒ¥Ûg] ,˜•]z5jÔˆ6nÜHÛ¶mS÷@DðÿÁÿ>–ÞCÄgÄgécÔ+û¤OÒ¥Ûç•ßüÔ.â3ⳟƳ}ˆ`'MÁuY ‚Ë–-£:ÐæÍ›“í]âLóÆ *гÏ>«–C„^õë×§n¸ @|ðÊZ`€m4—3$'"°0ÀÙÜæG}¤–QìÝ»—B3>¬²)ø÷¥J•¢.]ºÐöíÛiÁ‚ª\ïÞ½iîܹJ\à¥I]lߊ+ "Óv™Á.’¨Ç*ÄgÄg«cÇïå¤OÒ¥Ûç÷ñáFÿŸŸÝg:¶AG¯Y°9¹5]œapï½÷Æ×˜x9Äÿ·wÿ >wqÀ¿IJd ”²XüI’Á$™ 6Q )ÝÁdPŒ"ÿJÉ##eA= eÐS¥'I)ÅÀÀ`àé|ËÍ•8çþ9Ïç~î뎜ßïûù¼ÎéÔyßï÷{¯^I¸ÿ~sˆP‚íÛ·!Ä÷Ÿ,]ºtxÿþý°|ùòñŸoݺ5>>pàÀ?†§OŸïŠ(Ï\mذaò{ Ê·oß|ïž(w@ܹsç·ŠB„i,²yú!Â<¸DeÛŸíω–ó¬¶ý½¾YŒúeögûó]úl[ˆðG¢¦›¤–ÇÊ ¾áÓ§OÃÍ›7‡ƒ/^œòN„ïb{öìÿÿÇ;Ê~óæÍCy¤áçŸß=ÎP^ÐX®ùñãÇñ΂l¬X±b8qâÄpôèÑ)w"”; Êc kÖ¬™r‰*”;^¼x1\¸pAˆciϸ !ÂŒ }Á ìÏÃúÚŸg¸~<ú!=z} —D÷–ìÏöçî‹nž\Pˆ0O&j¦eNw|÷îÝøN„'OžLy·AùM~y´ üe‡_=ÎPûå·ÿå®߉ðåË—aÙ²ecPîH(?å½å=‡žr'ÂçÏŸ‡òXëW¯†õë×1”ÇÊ»Ê;¾¿±¼¿àåË—“/V¼téÒðôéÓáÊ•+ã÷—Ï•úwîÜ9ÖZ^üxþüy!ÂLU’Ï ’LäªðsˆP‚…ò%øÕ_g(/?ܸqãPM(v²ÜIP^v¸oß¾ÉaÕªUÃåË—Ç?³xòäÉáìÙ³cP>»iÓ¦áÔ©Sã»Ê Ÿ={6>²PîJxüøñøïå1Š»wïÛ¶m¯Swxøð¡a~.ß9­Zˆ0§¼¾¼BÀþl®X& rHôCzôú䢙å¦íÏöçY^Ri¾Nˆf*ßHÙËÝ‹/ž2ðСCS~+ÿ«?ñøíÛ·qLù  Ö®];Þ5P„E‹óãÇK–,¾~ý:¬^½zØ»wïpîܹñŽŸÞ¾};~þÑ£GãØòèÁîÝ»§üu†7nŒÁA víÚ5ÞIðúõëáùóçC©±Ü‘°råÊabbb8räÈø× vìØ1lÙ²e .îÝ»7>†Q‰(\»vm¼ƒÁ dÁ7´)DhÀ2tNìÏöç9YX ¾4ú!=z} –ÀÿÞ‚ýÙþü¿/ ‚NŒ²è# Dèãì*hˆ~H^_«·ñ¨"ÔJG€@J!BÊiÕ ¢Ò£×—` h B„ £,úú8» Z¢Ò£××êm<j„µRÆ R@ˆrZ5E€@è‡ôèõ%XZ @ ¨€!èÄ(‹>B„>ήB€Vè‡ôèõµzO€Z!B­”q¤"¤œVM @ ú!=z} –€* D:1Ê"@ €¡³« @ U ú!=z}­ÞÆ @ V@ˆP+e)„)§US$ˆ~H^_‚% ‚ ‚NŒ²è# Dèãì*hˆ~H^_«·ñ¨"ÔJG€@J!BÊiÕ ¢Ò£×—` h B„ £,úú8» Z¢Ò£××êm<j„µRÆ R@ˆrZ5E€@è‡ôèõ%XZ @ ¨€!èÄ(‹>B„>ήB€Vè‡ôèõµzO€Z!B­”q¤"¤œVM @ ú!=z} –€* D:1Ê"@ €¡³« @ U ú!=z}­ÞÆ @ V@ˆP+e)„)§US$ˆ~H^_‚% ‚ ‚NŒ²è# Dèãì*hˆ~H^_«·ñ¨"ÔJG€@J!BÊiÕ ¢Ò£×—` h B„ £,úú8» Z¢Ò£××êm<j„µRÆ R@ˆrZ5E€@è‡ôèõ%XZ @ ¨€!èÄ(‹>B„>ήB€Vè‡ôèõµzO€Z!B­”q¤"¤œVM @ ú!=z} –€* D:1Ê"@ €¡³« @ U ú!=z}­ÞÆ @ V@ˆP+e)„)§US$ˆ~H^_‚% ‚ ‚NŒ²è# Dèãì*hˆ~H^_«·ñ¨"ÔJG€@J!BÊiÕ ¢Ò£×—` h B„ £,úú8» Z¢Ò£××êm<j„µRÆ R@ˆrZ5E€@è‡ôèõ%XZ @ ¨€!èÄ(‹>B„>ήB€Vè‡ôèõµzO€Z!B­”q¤"¤œVM @ ú!=z} –€* D:1Ê"@ €¡³« @ U ú!=z}­ÞÆ @ V@ˆP+e)„)§US$ˆ~H^_‚% ‚ ‚NŒ²è# Dèãì*hˆ~H^_«·ñ¨"ÔJG€@J!BÊiÕ ¢Ò£×—` h B„ £,úú8» Z¢Ò£××êm<j&C„ÚG€lŸÛRÙ¤ý @`¡ üûÏßa[·?‡… ÐAà?QvÑ—»1¤ÈIEND®B`‚patroni-3.2.2/docs/citus.rst000066400000000000000000000404211455170150700160110ustar00rootroot00000000000000.. _citus: Citus support ============= Patroni makes it extremely simple to deploy `Multi-Node Citus`__ clusters. __ https://docs.citusdata.com/en/stable/installation/multi_node.html TL;DR ----- There are only a few simple rules you need to follow: 1. `Citus `__ database extension to PostgreSQL must be available on all nodes. Absolute minimum supported Citus version is 10.0, but, to take all benefits from transparent switchovers and restarts of workers we recommend using at least Citus 11.2. 2. Cluster name (``scope``) must be the same for all Citus nodes! 3. Superuser credentials must be the same on coordinator and all worker nodes, and ``pg_hba.conf`` should allow superuser access between all nodes. 4. :ref:`REST API ` access should be allowed from worker nodes to the coordinator. E.g., credentials should be the same and if configured, client certificates from worker nodes must be accepted by the coordinator. 5. Add the following section to the ``patroni.yaml``: .. code:: YAML citus: group: X # 0 for coordinator and 1, 2, 3, etc for workers database: citus # must be the same on all nodes After that you just need to start Patroni and it will handle the rest: 1. ``citus`` extension will be automatically added to ``shared_preload_libraries``. 2. If ``max_prepared_transactions`` isn't explicitly set in the global :ref:`dynamic configuration ` Patroni will automatically set it to ``2*max_connections``. 3. The ``citus.local_hostname`` GUC value will be adjusted from ``localhost`` to the value that Patroni is using in order to connect to the local PostgreSQL instance. The value sometimes should be different from the ``localhost`` because PostgreSQL might be not listening on it. 4. The ``citus.database`` will be automatically created followed by ``CREATE EXTENSION citus``. 5. Current superuser :ref:`credentials ` will be added to the ``pg_dist_authinfo`` table to allow cross-node communication. Don't forget to update them if later you decide to change superuser username/password/sslcert/sslkey! 6. The coordinator primary node will automatically discover worker primary nodes and add them to the ``pg_dist_node`` table using the ``citus_add_node()`` function. 7. Patroni will also maintain ``pg_dist_node`` in case failover/switchover on the coordinator or worker clusters occurs. patronictl ---------- Coordinator and worker clusters are physically different PostgreSQL/Patroni clusters that are just logically groupped together using the `Citus `__ database extension to PostgreSQL. Therefore in most cases it is not possible to manage them as a single entity. It results in two major differences in :ref:`patronictl` behaviour when ``patroni.yaml`` has the ``citus`` section comparing with the usual: 1. The ``list`` and the ``topology`` by default output all members of the Citus formation (coordinators and workers). The new column ``Group`` indicates which Citus group they belong to. 2. For all ``patronictl`` commands the new option is introduced, named ``--group``. For some commands the default value for the group might be taken from the ``patroni.yaml``. For example, :ref:`patronictl_pause` will enable the maintenance mode by default for the ``group`` that is set in the ``citus`` section, but for example for :ref:`patronictl_switchover` or :ref:`patronictl_remove` the group must be explicitly specified. An example of :ref:`patronictl_list` output for the Citus cluster:: postgres@coord1:~$ patronictl list demo + Citus cluster: demo ----------+--------------+---------+----+-----------+ | Group | Member | Host | Role | State | TL | Lag in MB | +-------+---------+-------------+--------------+---------+----+-----------+ | 0 | coord1 | 172.27.0.10 | Replica | running | 1 | 0 | | 0 | coord2 | 172.27.0.6 | Sync Standby | running | 1 | 0 | | 0 | coord3 | 172.27.0.4 | Leader | running | 1 | | | 1 | work1-1 | 172.27.0.8 | Sync Standby | running | 1 | 0 | | 1 | work1-2 | 172.27.0.2 | Leader | running | 1 | | | 2 | work2-1 | 172.27.0.5 | Sync Standby | running | 1 | 0 | | 2 | work2-2 | 172.27.0.7 | Leader | running | 1 | | +-------+---------+-------------+--------------+---------+----+-----------+ If we add the ``--group`` option, the output will change to:: postgres@coord1:~$ patronictl list demo --group 0 + Citus cluster: demo (group: 0, 7179854923829112860) -----------+ | Member | Host | Role | State | TL | Lag in MB | +--------+-------------+--------------+---------+----+-----------+ | coord1 | 172.27.0.10 | Replica | running | 1 | 0 | | coord2 | 172.27.0.6 | Sync Standby | running | 1 | 0 | | coord3 | 172.27.0.4 | Leader | running | 1 | | +--------+-------------+--------------+---------+----+-----------+ postgres@coord1:~$ patronictl list demo --group 1 + Citus cluster: demo (group: 1, 7179854923881963547) -----------+ | Member | Host | Role | State | TL | Lag in MB | +---------+------------+--------------+---------+----+-----------+ | work1-1 | 172.27.0.8 | Sync Standby | running | 1 | 0 | | work1-2 | 172.27.0.2 | Leader | running | 1 | | +---------+------------+--------------+---------+----+-----------+ Citus worker switchover ----------------------- When a switchover is orchestrated for a Citus worker node, Citus offers the opportunity to make the switchover close to transparent for an application. Because the application connects to the coordinator, which in turn connects to the worker nodes, then it is possible with Citus to `pause` the SQL traffic on the coordinator for the shards hosted on a worker node. The switchover then happens while the traffic is kept on the coordinator, and resumes as soon as a new primary worker node is ready to accept read-write queries. An example of :ref:`patronictl_switchover` on the worker cluster:: postgres@coord1:~$ patronictl switchover demo + Citus cluster: demo ----------+--------------+---------+----+-----------+ | Group | Member | Host | Role | State | TL | Lag in MB | +-------+---------+-------------+--------------+---------+----+-----------+ | 0 | coord1 | 172.27.0.10 | Replica | running | 1 | 0 | | 0 | coord2 | 172.27.0.6 | Sync Standby | running | 1 | 0 | | 0 | coord3 | 172.27.0.4 | Leader | running | 1 | | | 1 | work1-1 | 172.27.0.8 | Leader | running | 1 | | | 1 | work1-2 | 172.27.0.2 | Sync Standby | running | 1 | 0 | | 2 | work2-1 | 172.27.0.5 | Sync Standby | running | 1 | 0 | | 2 | work2-2 | 172.27.0.7 | Leader | running | 1 | | +-------+---------+-------------+--------------+---------+----+-----------+ Citus group: 2 Primary [work2-2]: Candidate ['work2-1'] []: When should the switchover take place (e.g. 2022-12-22T08:02 ) [now]: Current cluster topology + Citus cluster: demo (group: 2, 7179854924063375386) -----------+ | Member | Host | Role | State | TL | Lag in MB | +---------+------------+--------------+---------+----+-----------+ | work2-1 | 172.27.0.5 | Sync Standby | running | 1 | 0 | | work2-2 | 172.27.0.7 | Leader | running | 1 | | +---------+------------+--------------+---------+----+-----------+ Are you sure you want to switchover cluster demo, demoting current primary work2-2? [y/N]: y 2022-12-22 07:02:40.33003 Successfully switched over to "work2-1" + Citus cluster: demo (group: 2, 7179854924063375386) ------+ | Member | Host | Role | State | TL | Lag in MB | +---------+------------+---------+---------+----+-----------+ | work2-1 | 172.27.0.5 | Leader | running | 1 | | | work2-2 | 172.27.0.7 | Replica | stopped | | unknown | +---------+------------+---------+---------+----+-----------+ postgres@coord1:~$ patronictl list demo + Citus cluster: demo ----------+--------------+---------+----+-----------+ | Group | Member | Host | Role | State | TL | Lag in MB | +-------+---------+-------------+--------------+---------+----+-----------+ | 0 | coord1 | 172.27.0.10 | Replica | running | 1 | 0 | | 0 | coord2 | 172.27.0.6 | Sync Standby | running | 1 | 0 | | 0 | coord3 | 172.27.0.4 | Leader | running | 1 | | | 1 | work1-1 | 172.27.0.8 | Leader | running | 1 | | | 1 | work1-2 | 172.27.0.2 | Sync Standby | running | 1 | 0 | | 2 | work2-1 | 172.27.0.5 | Leader | running | 2 | | | 2 | work2-2 | 172.27.0.7 | Sync Standby | running | 2 | 0 | +-------+---------+-------------+--------------+---------+----+-----------+ And this is how it looks on the coordinator side:: # The worker primary notifies the coordinator that it is going to execute "pg_ctl stop". 2022-12-22 07:02:38,636 DEBUG: query("BEGIN") 2022-12-22 07:02:38,636 DEBUG: query("SELECT pg_catalog.citus_update_node(3, '172.27.0.7-demoted', 5432, true, 10000)") # From this moment all application traffic on the coordinator to the worker group 2 is paused. # The future worker primary notifies the coordinator that it acquired the leader lock in DCS and about to run "pg_ctl promote". 2022-12-22 07:02:40,085 DEBUG: query("SELECT pg_catalog.citus_update_node(3, '172.27.0.5', 5432)") # The new worker primary just finished promote and notifies coordinator that it is ready to accept read-write traffic. 2022-12-22 07:02:41,485 DEBUG: query("COMMIT") # From this moment the application traffic on the coordinator to the worker group 2 is unblocked. Peek into DCS ------------- The Citus cluster (coordinator and workers) are stored in DCS as a fleet of Patroni clusters logically grouped together:: /service/batman/ # scope=batman /service/batman/0/ # citus.group=0, coordinator /service/batman/0/initialize /service/batman/0/leader /service/batman/0/members/ /service/batman/0/members/m1 /service/batman/0/members/m2 /service/batman/1/ # citus.group=1, worker /service/batman/1/initialize /service/batman/1/leader /service/batman/1/members/ /service/batman/1/members/m3 /service/batman/1/members/m4 ... Such an approach was chosen because for most DCS it becomes possible to fetch the entire Citus cluster with a single recursive read request. Only Citus coordinator nodes are reading the whole tree, because they have to discover worker nodes. Worker nodes are reading only the subtree for their own group and in some cases they could read the subtree of the coordinator group. Citus on Kubernetes ------------------- Since Kubernetes doesn't support hierarchical structures we had to include the citus group to all K8s objects Patroni creates:: batman-0-leader # the leader config map for the coordinator batman-0-config # the config map holding initialize, config, and history "keys" ... batman-1-leader # the leader config map for worker group 1 batman-1-config ... I.e., the naming pattern is: ``${scope}-${citus.group}-${type}``. All Kubernetes objects are discovered by Patroni using the `label selector`__, therefore all Pods with Patroni&Citus and Endpoints/ConfigMaps must have similar labels, and Patroni must be configured to use them using Kubernetes :ref:`settings ` or :ref:`environment variables `. __ https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors A couple of examples of Patroni configuration using Pods environment variables: 1. for the coordinator cluster .. code:: YAML apiVersion: v1 kind: Pod metadata: labels: application: patroni citus-group: "0" citus-type: coordinator cluster-name: citusdemo name: citusdemo-0-0 namespace: default spec: containers: - env: - name: PATRONI_SCOPE value: citusdemo - name: PATRONI_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name - name: PATRONI_KUBERNETES_POD_IP valueFrom: fieldRef: apiVersion: v1 fieldPath: status.podIP - name: PATRONI_KUBERNETES_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - name: PATRONI_KUBERNETES_LABELS value: '{application: patroni}' - name: PATRONI_CITUS_DATABASE value: citus - name: PATRONI_CITUS_GROUP value: "0" 2. for the worker cluster from the group 2 .. code:: YAML apiVersion: v1 kind: Pod metadata: labels: application: patroni citus-group: "2" citus-type: worker cluster-name: citusdemo name: citusdemo-2-0 namespace: default spec: containers: - env: - name: PATRONI_SCOPE value: citusdemo - name: PATRONI_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name - name: PATRONI_KUBERNETES_POD_IP valueFrom: fieldRef: apiVersion: v1 fieldPath: status.podIP - name: PATRONI_KUBERNETES_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - name: PATRONI_KUBERNETES_LABELS value: '{application: patroni}' - name: PATRONI_CITUS_DATABASE value: citus - name: PATRONI_CITUS_GROUP value: "2" As you may noticed, both examples have ``citus-group`` label set. This label allows Patroni to identify object as belonging to a certain Citus group. In addition to that, there is also ``PATRONI_CITUS_GROUP`` environment variable, which has the same value as the ``citus-group`` label. When Patroni creates new Kubernetes objects ConfigMaps or Endpoints, it automatically puts the ``citus-group: ${env.PATRONI_CITUS_GROUP}`` label on them: .. code:: YAML apiVersion: v1 kind: ConfigMap metadata: name: citusdemo-0-leader # Is generated as ${env.PATRONI_SCOPE}-${env.PATRONI_CITUS_GROUP}-leader labels: application: patroni # Is set from the ${env.PATRONI_KUBERNETES_LABELS} cluster-name: citusdemo # Is automatically set from the ${env.PATRONI_SCOPE} citus-group: '0' # Is automatically set from the ${env.PATRONI_CITUS_GROUP} You can find a complete example of Patroni deployment on Kubernetes with Citus support in the `kubernetes`__ folder of the Patroni repository. __ https://github.com/zalando/patroni/tree/master/kubernetes There are two important files for you: 1. Dockerfile.citus 2. citus_k8s.yaml Citus upgrades and PostgreSQL major upgrades -------------------------------------------- First, please read about upgrading Citus version in the `documentation`__. There is one minor change in the process. When executing upgrade, you have to use :ref:`patronictl_restart` instead of ``systemctl restart`` to restart PostgreSQL. __ https://docs.citusdata.com/en/latest/admin_guide/upgrading_citus.html The PostgreSQL major upgrade with Citus is a bit more complex. You will have to combine techniques used in the Citus documentation about major upgrades and Patroni documentation about :ref:`PostgreSQL major upgrade`. Please keep in mind that Citus cluster consists of many Patroni clusters (coordinator and workers) and they all have to be upgraded independently. patroni-3.2.2/docs/conf.py000066400000000000000000000233371455170150700154360ustar00rootroot00000000000000#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Patroni documentation build configuration file, created by # sphinx-quickstart on Mon Dec 19 16:54:09 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('..')) from patroni.version import __version__ project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) module_dir = os.path.abspath(os.path.join(project_root, 'patroni')) excludes = ['tests', 'setup.py', 'conf'] # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', # 'sphinx.ext.viewcode', 'sphinx_github_style', # Generate "View on GitHub" for source code 'sphinxcontrib.apidoc', # For generating module docs from code 'sphinx.ext.autodoc', # For generating module docs from docstrings 'sphinx.ext.napoleon', # For Google and Numpy formatted docstrings ] apidoc_module_dir = module_dir apidoc_output_dir = 'modules' apidoc_excluded_paths = excludes apidoc_separate_modules = True # Include autodoc for all members, including private ones and the ones that are missing a docstring. autodoc_default_options = { "members": True, "undoc-members": True, "private-members": True, } # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Patroni' copyright = '2015 Compose, Zalando SE' author = 'Zalando SE' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = __version__[:__version__.rfind('.')] # The full version, including alpha/beta/rc tags. release = __version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if not on_rtd: # only import and set the theme if we're building docs locally import sphinx_rtd_theme html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Replace "source" links with "edit on GitHub" when using rtd theme html_context = { 'display_github': True, 'github_user': 'zalando', 'github_repo': 'patroni', 'github_version': 'master', 'conf_py_path': '/docs/', } # sphinx-github-style options, https://sphinx-github-style.readthedocs.io/en/latest/index.html # The name of the top-level package. top_level = "patroni" # The blob to link to on GitHub - any of "head", "last_tag", or "{blob}" # linkcode_blob = 'head' # The link to your GitHub repository formatted as https://github.com/user/repo # If not provided, will attempt to create the link from the html_context dict # linkcode_url = f"https://github.com/{html_context['github_user']}/" \ # f"{html_context['github_repo']}/{html_context['github_version']}" # The text to use for the linkcode link # linkcode_link_text: str = "View on GitHub" # A linkcode_resolve() function to use for resolving the link target # linkcode_resolve: types.FunctionType # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'Patronidoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'Patroni.tex', 'Patroni Documentation', 'Zalando SE', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'patroni', 'Patroni Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'Patroni', 'Patroni Documentation', author, 'Patroni', 'One line description of project.', 'Miscellaneous'), ] # -- Options for Epub output ---------------------------------------------- # Bibliographic Dublin Core info. epub_title = project epub_author = author epub_publisher = author epub_copyright = copyright # The unique identifier of the text. This can be a ISBN number # or the project homepage. # # epub_identifier = '' # A unique identification for the text. # # epub_uid = '' # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'python': ('https://docs.python.org/', None)} # Remove these pages from index, references, toc trees, etc. # If the builder is not 'html' then add the API docs modules index to pages to be removed. exclude_from_builder = { 'latex': ['modules/modules'], 'epub': ['modules/modules'], } # Internal holding list, anything added here will always be excluded _docs_to_remove = [] def builder_inited(app): """Run during Sphinx `builder-inited` phase. Set a config value to builder name and add module docs to `docs_to_remove`. """ print(f'The builder is: {app.builder.name}') app.add_config_value('builder', app.builder.name, 'env') # Remove pages when builder matches any referenced in exclude_from_builder if exclude_from_builder.get(app.builder.name): _docs_to_remove.extend(exclude_from_builder[app.builder.name]) def env_get_outdated(app, env, added, changed, removed): """Run during Sphinx `env-get-outdated` phase. Remove the items listed in `docs_to_remove` from known pages. """ added.difference_update(_docs_to_remove) changed.difference_update(_docs_to_remove) removed.update(_docs_to_remove) return [] def doctree_read(app, doctree): """Run during Sphinx `doctree-read` phase. Remove the items listed in `docs_to_remove` from the table of contents. """ from sphinx import addnodes for toc_tree_node in doctree.traverse(addnodes.toctree): for e in toc_tree_node['entries']: ref = str(e[1]) if ref in _docs_to_remove: toc_tree_node['entries'].remove(e) def autodoc_skip(app, what, name, obj, would_skip, options): """Include autodoc of ``__init__`` methods, which are skipped by default.""" if name == "__init__": return False return would_skip # A possibility to have an own stylesheet, to add new rules or override existing ones # For the latter case, the CSS specificity of the rules should be higher than the default ones def setup(app): if hasattr(app, 'add_css_file'): app.add_css_file('custom.css') else: app.add_stylesheet('custom.css') # Run extra steps to remove module docs when running with a non-html builder app.connect('builder-inited', builder_inited) app.connect('env-get-outdated', env_get_outdated) app.connect('doctree-read', doctree_read) app.connect("autodoc-skip-member", autodoc_skip) patroni-3.2.2/docs/contributing_guidelines.rst000066400000000000000000000141411455170150700216010ustar00rootroot00000000000000.. _contributing_guidelines: Contributing guidelines ======================= .. _chatting: Chatting -------- If you have a question, looking for an interactive troubleshooting help or want to chat with other Patroni users, join us on channel `#patroni `__ in the `PostgreSQL Slack `__. .. _reporting_bugs: Reporting bugs -------------- Before reporting a bug please make sure to **reproduce it with the latest Patroni version**! Also please double check if the issue already exists in our `Issues Tracker `__. Running tests ------------- Requirements for running behave tests: #. PostgreSQL packages including `contrib `__ modules need to be installed. #. PostgreSQL binaries must be available in your `PATH`. You may need to add them to the path with something like `PATH=/usr/lib/postgresql/11/bin:$PATH python -m behave`. #. If you'd like to test with external DCSs (e.g., Etcd, Consul, and Zookeeper) you'll need the packages installed and respective services running and accepting unencrypted/unprotected connections on localhost and default port. In the case of Etcd or Consul, the behave test suite could start them up if binaries are available in the `PATH`. Install dependencies: .. code-block:: bash # You may want to use Virtualenv or specify pip3. pip install -r requirements.txt pip install -r requirements.dev.txt After you have all dependencies installed, you can run the various test suites: .. code-block:: bash # You may want to use Virtualenv or specify python3. # Run flake8 to check syntax and formatting: python setup.py flake8 # Run the pytest suite in tests/: python setup.py test # Moreover, you may want to run tests in different scopes for debugging purposes, # the -s option include print output during test execution. # Tests in pytest typically follow the pattern: FILEPATH::CLASSNAME::TESTNAME. pytest -s tests/test_api.py pytest -s tests/test_api.py::TestRestApiHandler pytest -s tests/test_api.py::TestRestApiHandler::test_do_GET # Run the behave (https://behave.readthedocs.io/en/latest/) test suite in features/; # modify DCS as desired (raft has no dependencies so is the easiest to start with): DCS=raft python -m behave Testing with tox ---------------- To run tox tests you only need to install one dependency (other than Python) .. code-block:: bash pip install tox>=4 If you wish to run `behave` tests then you also need docker installed. Tox configuration in `tox.ini` has "environments" to run the following tasks: * lint: Python code lint with `flake8` * test: unit tests for all available python interpreters with `pytest`, generates XML reports or HTML reports if a TTY is detected * dep: detect package dependency conflicts using `pipdeptree` * type: static type checking with `pyright` * black: code formatting with `black` * docker-build: build docker image used for the `behave` env * docker-cmd: run arbitrary command with the above image * docker-behave-etcd: run tox for behave tests with above image * py*behave: run behave with available python interpreters (without docker, although this is what is called inside docker containers) * docs: build docs with `sphinx` Running tox ^^^^^^^^^^^ To run the default env list; dep, lint, test, and docs, just run: .. code-block:: bash tox The `test` envs can be run with the label `test`: .. code-block:: bash tox -m test The `behave` docker tests can be run with the label `behave`: .. code-block:: bash tox -m behave Similarly, docs has the label `docs`. All other envs can be run with their respective env names: .. code-block:: bash tox -e lint tox -e py39-test-lin It is also possible to select partial env lists using `factors`. For example, if you want to run all envs for python 3.10: .. code-block:: bash tox -f py310 This is equivalent to running all the envs listed below: .. code-block:: bash $ tox -l -f py310 py310-test-lin py310-test-mac py310-test-win py310-type-lin py310-type-mac py310-type-win py310-behave-etcd-lin py310-behave-etcd-win py310-behave-etcd-mac You can list all configured combinations of environments with tox (>=v4) like so .. code-block:: bash tox l The envs `test` and `docs` will attempt to open the HTML output files when the job completes, if tox is run with an active terminal. This is intended to be for benefit of the developer running this env locally. It will attempt to run `open` on a mac and `xdg-open` on Linux. To use a different command set the env var `OPEN_CMD` to the name or path of the command. If this step fails it will not fail the run overall. If you want to disable this facility set the env var `OPEN_CMD` to the `:` no-op command. .. code-block:: bash OPEN_CMD=: tox -m docs Behave tests ^^^^^^^^^^^^ Behave tests with `-m behave` will build docker images based on PG_MAJOR version 11 through 16 and then run all behave tests. This can take quite a long time to run so you might want to limit the scope to a select version of Postgres or to a specific feature set or steps. To specify the version of postgres include the full name of the dependent image build env that you want and then the behave env name. For instance if you want Postgres 14 use: .. code-block:: bash tox -e pg14-docker-build,pg14-docker-behave-etcd-lin If on the other hand you want to test a specific feature you can pass positional arguments to behave. This will run the watchdog behave feature test scenario with all versions of Postgres. .. code-block:: bash tox -m behave -- features/watchdog.feature Of course you can combine the two. Contributing a pull request --------------------------- #. Fork the repository, develop and test your code changes. #. Reflect changes in the user documentation. #. Submit a pull request with a clear description of the changes objective. Link an existing issue if necessary. You'll get feedback about your pull request as soon as possible. Happy Patroni hacking ;-) patroni-3.2.2/docs/dcs_failsafe_mode.rst000066400000000000000000000110561455170150700202730ustar00rootroot00000000000000.. _dcs_failsafe_mode: DCS Failsafe Mode ================= The problem ----------- Patroni is heavily relying on Distributed Configuration Store (DCS) to solve the task of leader elections and detect network partitioning. That is, the node is allowed to run Postgres as the primary only if it can update the leader lock in DCS. In case the update of the leader lock fails, Postgres is immediately demoted and started as read-only. Depending on which DCS is used, the chances of hitting the "problem" differ. For example, with Etcd which is only used for Patroni, chances are close to zero, while with K8s API (backed by Etcd) it could be observed more frequently. Reasons for the current implementation --------------------------------------- The leader lock update failure could be caused by two main reasons: 1. Network partitioning 2. DCS being down In general, it is impossible to distinguish between these two from a single node, and therefore Patroni assumes the worst case - network partitioning. In the case of a partitioned network, other nodes of the Patroni cluster may successfully grab the leader lock and promote Postgres to primary. In order to avoid a split-brain, the old primary is demoted before the leader lock expires. DCS Failsafe Mode ----------------- We introduce a new special option, the ``failsafe_mode``. It could be enabled only via global :ref:`dynamic configuration ` stored in the DCS ``/config`` key. If the failsafe mode is enabled and the leader lock update in DCS failed due to reasons different from the version/value/index mismatch, Postgres may continue to run as a primary if it can access all known members of the cluster via Patroni REST API. Low-level implementation details -------------------------------- - We introduce a new, permanent key in DCS, named ``/failsafe``. - The ``/failsafe`` key contains all known members of the given Patroni cluster at a given time. - The current leader maintains the ``/failsafe`` key. - The member is allowed to participate in the leader race and become the new leader only if it is present in the ``/failsafe`` key. - If the cluster consists of a single node the ``/failsafe`` key will contain a single member. - In the case of DCS "outage" the existing primary connects to all members presented in the ``/failsafe`` key via the ``POST /failsafe`` REST API and may continue to run as the primary if all replicas acknowledge it. - If one of the members doesn't respond, the primary is demoted. - Replicas are using incoming ``POST /failsafe`` REST API requests as an indicator that the primary is still alive. This information is cached for ``ttl`` seconds. F.A.Q. ------ - Why MUST the current primary see ALL other members? Can’t we rely on quorum here? This is a great question! The problem is that the view on the quorum might be different from the perspective of DCS and Patroni. While DCS nodes must be evenly distributed across availability zones, there is no such rule for Patroni, and more importantly, there is no mechanism for introducing and enforcing such a rule. If the majority of Patroni nodes ends up in the losing part of the partitioned network (including primary) while minority nodes are in the winning part, the primary must be demoted. Only checking ALL other members allows detecting such a situation. - What if node/pod gets terminated while DCS is down? If DCS isn’t accessible, the check “are ALL other cluster members accessible?†is executed every cycle of the heartbeat loop (every ``loop_wait`` seconds). If pod/node is terminated, the check will fail and Postgres will be demoted to a read-only and will not recover until DCS is restored. - What if all members of the Patroni cluster are lost while DCS is down? Patroni could be configured to create the new replica from the backup even when the cluster doesn't have a leader. But, if the new member isn't present in the ``/failsafe`` key, it will not be able to grab the leader lock and promote. - What will happen if the primary lost access to DCS while replicas didn't? The primary will execute the failsafe code and contact all known replicas. These replicas will use this information as an indicator that the primary is alive and will not start the leader race even if the leader lock in DCS has expired. - How to enable the Failsafe Mode? Before enabling the ``failsafe_mode`` please make sure that Patroni version on all members is up-to-date. After that, you can use either the ``PATCH /config`` :ref:`REST API ` or :ref:`patronictl edit-config -s failsafe_mode=true ` patroni-3.2.2/docs/dynamic_configuration.rst000066400000000000000000000246741455170150700212510ustar00rootroot00000000000000.. _dynamic_configuration: ============================== Dynamic Configuration Settings ============================== Dynamic configuration is stored in the DCS (Distributed Configuration Store) and applied on all cluster nodes. In order to change the dynamic configuration you can use either :ref:`patronictl_edit_config` tool or Patroni :ref:`REST API `. - **loop\_wait**: the number of seconds the loop will sleep. Default value: 10, minimum possible value: 1 - **ttl**: the TTL to acquire the leader lock (in seconds). Think of it as the length of time before initiation of the automatic failover process. Default value: 30, minimum possible value: 20 - **retry\_timeout**: timeout for DCS and PostgreSQL operation retries (in seconds). DCS or network issues shorter than this will not cause Patroni to demote the leader. Default value: 10, minimum possible value: 3 .. warning:: when changing values of **loop_wait**, **retry_timeout**, or **ttl** you have to follow the rule: .. code-block:: python loop_wait + 2 * retry_timeout <= ttl - **maximum\_lag\_on\_failover**: the maximum bytes a follower may lag to be able to participate in leader election. - **maximum\_lag\_on\_syncnode**: the maximum bytes a synchronous follower may lag before it is considered as an unhealthy candidate and swapped by healthy asynchronous follower. Patroni utilize the max replica lsn if there is more than one follower, otherwise it will use leader's current wal lsn. Default is -1, Patroni will not take action to swap synchronous unhealthy follower when the value is set to 0 or below. Please set the value high enough so Patroni won't swap synchrounous follower fequently during high transaction volume. - **max\_timelines\_history**: maximum number of timeline history items kept in DCS. Default value: 0. When set to 0, it keeps the full history in DCS. - **primary\_start\_timeout**: the amount of time a primary is allowed to recover from failures before failover is triggered (in seconds). Default is 300 seconds. When set to 0 failover is done immediately after a crash is detected if possible. When using asynchronous replication a failover can cause lost transactions. Worst case failover time for primary failure is: loop\_wait + primary\_start\_timeout + loop\_wait, unless primary\_start\_timeout is zero, in which case it's just loop\_wait. Set the value according to your durability/availability tradeoff. - **primary\_stop\_timeout**: The number of seconds Patroni is allowed to wait when stopping Postgres and effective only when synchronous_mode is enabled. When set to > 0 and the synchronous_mode is enabled, Patroni sends SIGKILL to the postmaster if the stop operation is running for more than the value set by primary\_stop\_timeout. Set the value according to your durability/availability tradeoff. If the parameter is not set or set <= 0, primary\_stop\_timeout does not apply. - **synchronous\_mode**: turns on synchronous replication mode. In this mode a replica will be chosen as synchronous and only the latest leader and synchronous replica are able to participate in leader election. Synchronous mode makes sure that successfully committed transactions will not be lost at failover, at the cost of losing availability for writes when Patroni cannot ensure transaction durability. See :ref:`replication modes documentation ` for details. - **synchronous\_mode\_strict**: prevents disabling synchronous replication if no synchronous replicas are available, blocking all client writes to the primary. See :ref:`replication modes documentation ` for details. - **failsafe\_mode**: Enables :ref:`DCS Failsafe Mode `. Defaults to `false`. - **postgresql**: - **use\_pg\_rewind**: whether or not to use pg_rewind. Defaults to `false`. - **use\_slots**: whether or not to use replication slots. Defaults to `true` on PostgreSQL 9.4+. - **recovery\_conf**: additional configuration settings written to recovery.conf when configuring follower. There is no recovery.conf anymore in PostgreSQL 12, but you may continue using this section, because Patroni handles it transparently. - **parameters**: list of configuration settings for Postgres. - **pg\_hba**: list of lines that Patroni will use to generate ``pg_hba.conf``. Patroni ignores this parameter if ``hba_file`` PostgreSQL parameter is set to a non-default value. - **- host all all 0.0.0.0/0 md5** - **- host replication replicator 127.0.0.1/32 md5**: A line like this is required for replication. - **pg\_ident**: list of lines that Patroni will use to generate ``pg_ident.conf``. Patroni ignores this parameter if ``ident_file`` PostgreSQL parameter is set to a non-default value. - **- mapname1 systemname1 pguser1** - **- mapname1 systemname2 pguser2** - **standby\_cluster**: if this section is defined, we want to bootstrap a standby cluster. - **host**: an address of remote node - **port**: a port of remote node - **primary\_slot\_name**: which slot on the remote node to use for replication. This parameter is optional, the default value is derived from the instance name (see function `slot_name_from_member_name`). - **create\_replica\_methods**: an ordered list of methods that can be used to bootstrap standby leader from the remote primary, can be different from the list defined in :ref:`postgresql_settings` - **restore\_command**: command to restore WAL records from the remote primary to nodes in a standby cluster, can be different from the list defined in :ref:`postgresql_settings` - **archive\_cleanup\_command**: cleanup command for standby leader - **recovery\_min\_apply\_delay**: how long to wait before actually apply WAL records on a standby leader - **slots**: define permanent replication slots. These slots will be preserved during switchover/failover. Permanent slots that don't exist will be created by Patroni. With PostgreSQL 11 onwards permanent physical slots are created on all nodes and their position is advanced every **loop_wait** seconds. For PostgreSQL versions older than 11 permanent physical replication slots are maintained only on the current primary. The logical slots are copied from the primary to a standby with restart, and after that their position advanced every **loop_wait** seconds (if necessary). Copying logical slot files performed via ``libpq`` connection and using either rewind or superuser credentials (see **postgresql.authentication** section). There is always a chance that the logical slot position on the replica is a bit behind the former primary, therefore application should be prepared that some messages could be received the second time after the failover. The easiest way of doing so - tracking ``confirmed_flush_lsn``. Enabling permanent replication slots requires **postgresql.use_slots** to be set to ``true``. If there are permanent logical replication slots defined Patroni will automatically enable the ``hot_standby_feedback``. Since the failover of logical replication slots is unsafe on PostgreSQL 9.6 and older and PostgreSQL version 10 is missing some important functions, the feature only works with PostgreSQL 11+. - **my\_slot\_name**: the name of the permanent replication slot. If the permanent slot name matches with the name of the current node it will not be created on this node. If you add a permanent physical replication slot which name matches the name of a Patroni member, Patroni will ensure that the slot that was created is not removed even if the corresponding member becomes unresponsive, situation which would normally result in the slot's removal by Patroni. Although this can be useful in some situations, such as when you want replication slots used by members to persist during temporary failures or when importing existing members to a new Patroni cluster (see :ref:`Convert a Standalone to a Patroni Cluster ` for details), caution should be exercised by the operator that these clashes in names are not persisted in the DCS, when the slot is no longer required, due to its effect on normal functioning of Patroni. - **type**: slot type. Could be ``physical`` or ``logical``. If the slot is logical, you have to additionally define ``database`` and ``plugin``. - **database**: the database name where logical slots should be created. - **plugin**: the plugin name for the logical slot. - **ignore\_slots**: list of sets of replication slot properties for which Patroni should ignore matching slots. This configuration/feature/etc. is useful when some replication slots are managed outside of Patroni. Any subset of matching properties will cause a slot to be ignored. - **name**: the name of the replication slot. - **type**: slot type. Can be ``physical`` or ``logical``. If the slot is logical, you may additionally define ``database`` and/or ``plugin``. - **database**: the database name (when matching a ``logical`` slot). - **plugin**: the logical decoding plugin (when matching a ``logical`` slot). Note: **slots** is a hashmap while **ignore_slots** is an array. For example: .. code:: YAML slots: permanent_logical_slot_name: type: logical database: my_db plugin: test_decoding permanent_physical_slot_name: type: physical ... ignore_slots: - name: ignored_logical_slot_name type: logical database: my_db plugin: test_decoding - name: ignored_physical_slot_name type: physical ... Note: if cluster topology is static (fixed number of nodes that never change their names) you can configure permanent physical replication slots with names corresponding to names of nodes to avoid recycling of WAL files while replica is temporary down: .. code:: YAML slots: node_name1: type: physical node_name2: type: physical node_name3: type: physical ... .. warning:: Permanent replication slots are synchronized only from the ``primary``/``standby_leader`` to replica nodes. That means, applications are supposed to be using them only from the leader node. Using them on replica nodes will cause indefinite growth of ``pg_wal`` on all other nodes in the cluster. An exception to that rule are permanent physical slots that match the Patroni member names, if you happen to configure any. Those will be synchronized among all nodes as they are used for replication among them. patroni-3.2.2/docs/existing_data.rst000066400000000000000000000163421455170150700175120ustar00rootroot00000000000000.. _existing_data: Convert a Standalone to a Patroni Cluster ========================================= This section describes the process for converting a standalone PostgreSQL instance into a Patroni cluster. To deploy a Patroni cluster without using a pre-existing PostgreSQL instance, see :ref:`Running and Configuring ` instead. Procedure --------- You can find below an overview of steps for converting an existing Postgres cluster to a Patroni managed cluster. In the steps we assume all nodes that are part of the existing cluster are currently up and running, and that you *do not* intend to change Postgres configuration while the migration is ongoing. The steps: #. Create the Postgres users as explained for :ref:`authentication ` section of the Patroni configuration. You can find sample SQL commands to create the users in the code block below, in which you need to replace the usernames and passwords as per your environment. If you already have the relevant users, then you can skip this step. .. code-block:: sql -- Patroni superuser -- Replace PATRONI_SUPERUSER_USERNAME and PATRONI_SUPERUSER_PASSWORD accordingly CREATE USER PATRONI_SUPERUSER_USERNAME WITH SUPERUSER ENCRYPTED PASSWORD 'PATRONI_SUPERUSER_PASSWORD'; -- Patroni replication user -- Replace PATRONI_REPLICATION_USERNAME and PATRONI_REPLICATION_PASSWORD accordingly CREATE USER PATRONI_REPLICATION_USERNAME WITH REPLICATION ENCRYPTED PASSWORD 'PATRONI_REPLICATION_PASSWORD'; -- Patroni rewind user, if you intend to enable use_pg_rewind in your Patroni configuration -- Replace PATRONI_REWIND_USERNAME and PATRONI_REWIND_PASSWORD accordingly CREATE USER PATRONI_REWIND_USERNAME WITH ENCRYPTED PASSWORD 'PATRONI_REWIND_PASSWORD'; GRANT EXECUTE ON function pg_catalog.pg_ls_dir(text, boolean, boolean) TO PATRONI_REWIND_USERNAME; GRANT EXECUTE ON function pg_catalog.pg_stat_file(text, boolean) TO PATRONI_REWIND_USERNAME; GRANT EXECUTE ON function pg_catalog.pg_read_binary_file(text) TO PATRONI_REWIND_USERNAME; GRANT EXECUTE ON function pg_catalog.pg_read_binary_file(text, bigint, bigint, boolean) TO PATRONI_REWIND_USERNAME; #. Perform the following steps on all Postgres nodes. Perform all steps on one node before proceeding with the next node. Start with the primary node, then proceed with each standby node: #. If you are running Postgres through systemd, then disable the Postgres systemd unit. This is performed as Patroni manages starting and stopping the Postgres daemon. #. Create a YAML configuration file for Patroni. You can use :ref:`Patroni configuration generation and validation tooling ` for that. * **Note (specific for the primary node):** If you have replication slots being used for replication between cluster members, then it is recommended that you enable ``use_slots`` and configure the existing replication slots as permanent via the ``slots`` configuration item. Be aware that Patroni automatically creates replication slots for replication between members, and drops replication slots that it does not recognize, when ``use_slots`` is enabled. The idea of using permanent slots here is to allow your existing slots to persist while the migration to Patroni is in progress. See :ref:`YAML Configuration Settings ` for details. #. Start Patroni using the ``patroni`` systemd service unit. It automatically detects that Postgres is already running and starts monitoring the instance. #. Hand over Postgres "start up procedure" to Patroni. In order to do that you need to restart the cluster members through :ref:`patronictl restart cluster-name member-name ` command. For minimal downtime you might want to split this step into: #. Immediate restart of the standby nodes. #. Scheduled restart of the primary node within a maintenance window. #. If you configured permanent slots in step ``1.2.``, then you should remove them from ``slots`` configuration through :ref:`patronictl edit-config cluster-name member-name ` command once the ``restart_lsn`` of the slots created by Patroni is able to catch up with the ``restart_lsn`` of the original slots for the corresponding members. By removing the slots from ``slots`` configuration you will allow Patroni to drop the original slots from your cluster once they are not needed anymore. You can find below an example query to check the ``restart_lsn`` of a couple slots, so you can compare them: .. code-block:: sql -- Assume original_slot_for_member_x is the name of the slot in your original -- cluster for replicating changes to member X, and slot_for_member_x is the -- slot created by Patroni for that purpose. You need restart_lsn of -- slot_for_member_x to be >= restart_lsn of original_slot_for_member_x SELECT slot_name, restart_lsn FROM pg_replication_slots WHERE slot_name IN ( 'original_slot_for_member_x', 'slot_for_member_x' ) .. _major_upgrade: Major Upgrade of PostgreSQL Version =================================== The only possible way to do a major upgrade currently is: #. Stop Patroni #. Upgrade PostgreSQL binaries and perform `pg_upgrade `_ on the primary node #. Update patroni.yml #. Remove the initialize key from DCS or wipe complete cluster state from DCS. The second one could be achieved by running :ref:`patronictl remove cluster-name ` . It is necessary because pg_upgrade runs initdb which actually creates a new database with a new PostgreSQL system identifier. #. If you wiped the cluster state in the previous step, you may wish to copy patroni.dynamic.json from old data dir to the new one. It will help you to retain some PostgreSQL parameters you had set before. #. Start Patroni on the primary node. #. Upgrade PostgreSQL binaries, update patroni.yml and wipe the data_dir on standby nodes. #. Start Patroni on the standby nodes and wait for the replication to complete. Running pg_upgrade on standby nodes is not supported by PostgreSQL. If you know what you are doing, you can try the rsync procedure described in https://www.postgresql.org/docs/current/pgupgrade.html instead of wiping data_dir on standby nodes. The safest way is however to let Patroni replicate the data for you. FAQ --- - During Patroni startup, Patroni complains that it cannot bind to the PostgreSQL port. You need to verify ``listen_addresses`` and ``port`` in ``postgresql.conf`` and ``postgresql.listen`` in ``patroni.yml``. Don't forget that ``pg_hba.conf`` should allow such access. - After asking Patroni to restart the node, PostgreSQL displays the error message ``could not open configuration file "/etc/postgresql/10/main/pg_hba.conf": No such file or directory`` It can mean various things depending on how you manage PostgreSQL configuration. If you specified `postgresql.config_dir`, Patroni generates the ``pg_hba.conf`` based on the settings in the :ref:`bootstrap ` section only when it bootstraps a new cluster. In this scenario the ``PGDATA`` was not empty, therefore no bootstrap happened. This file must exist beforehand. patroni-3.2.2/docs/faq.rst000066400000000000000000000516011455170150700154330ustar00rootroot00000000000000.. _faq: FAQ === In this section you will find answers for the most frequently asked questions about Patroni. Each sub-section attempts to focus on different kinds of questions. We hope that this helps you to clarify most of your questions. If you still have further concerns or find yourself facing an unexpected issue, please refer to :ref:`chatting` and :ref:`reporting_bugs` for instructions on how to get help or report issues. Comparison with other HA solutions ---------------------------------- Why does Patroni require a separate cluster of DCS nodes while other solutions like ``repmgr`` do not? There are different ways of implementing HA solutions, each of them with their pros and cons. Software like ``repmgr`` performs communication among the nodes to decide when actions should be taken. Patroni on the other hand relies on the state stored in the DCS. The DCS acts as a source of truth for Patroni to decide what it should do. While having a separate DCS cluster can make you bloat your architecture, this approach also makes it less likely for split-brain scenarios to happen in your Postgres cluster. What is the difference between Patroni and other HA solutions in regards to Postgres management? Patroni does not just manage the high availability of the Postgres cluster but also manages Postgres itself. If Postgres nodes do not exist yet, it takes care of bootstrapping the primary and the standby nodes, and also manages Postgres configuration of the nodes. If the Postgres nodes already exist, Patroni will take over management of the cluster. Besides the above, Patroni also has self-healing capabilities. In other words, if a primary node fails, Patroni will not only fail over to a replica, but also attempt to rejoin the former primary as a replica of the new primary. Similarly, if a replica fails, Patroni will attempt to rejoin that replica. That is way we call Patroni as a "template for HA solutions". It goes further than just managing physical replication: it manages Postgres as a whole. DCS --- Can I use the same ``etcd`` cluster to store data from two or more Patroni clusters? Yes, you can! Information about a Patroni cluster is stored in the DCS under a path prefixed with the ``namespace`` and ``scope`` Patroni settings. As long as you do not have conflicting namespace and scope across different Patroni clusters, you should be able to use the same DCS cluster to store information from multiple Patroni clusters. What occurs if I attempt to use the same combination of ``namespace`` and ``scope`` for different Patroni clusters that point to the same DCS cluster? The second Patroni cluster that attempts to use the same ``namespace`` and ``scope`` will not be able to manage Postgres because it will find information related with that same combination in the DCS, but with an incompatible Postgres system identifier. The mismatch on the system identifier causes Patroni to abort the management of the second cluster, as it assumes that refers to a different cluster and that the user has misconfigured Patroni. Make sure to use different ``namespace`` / ``scope`` when dealing with different Patroni clusters that share the same DCS cluster. What occurs if I lose my DCS cluster? The DCS is used to store basically status and the dynamic configuration of the Patroni cluster. They very first consequence is that all the Patroni clusters that rely on that DCS will go to read-only mode -- unless :ref:`dcs_failsafe_mode` is enabled. What should I do if I lose my DCS cluster? There are three possible outcomes upon losing your DCS cluster: 1. The DCS cluster is fully recovered: this requires no action from the Patroni side. Once the DCS cluster is recovered, Patroni should be able to recover too; 2. The DCS cluster is re-created in place, and the endpoints remain the same. No changes are required on the Patroni side; 3. A new DCS cluster is created with different endpoints. You will need to update the DCS endpoints in the Patroni configuration of each Patroni node. If you face scenario ``2.`` or ``3.`` Patroni will take care of creating the status information again based on the current status of the cluster, and recreate the dynamic configuration on the DCS based on a backup file named ``patroni.dynamic.json`` which is stored inside the Postgres data directory of each member of the Patroni cluster. What occurs if I lose majority in my DCS cluster? The DCS will become unresponsive, which will cause Patroni to demote the current read/write Postgres node. Remember: Patroni relies on the state of the DCS to take actions on the cluster. You can use the :ref:`dcs_failsafe_mode` to alleviate that situation. patronictl ---------- Do I need to run :ref:`patronictl` in the Patroni host? No, you do not need to do that. Running :ref:`patronictl` in the Patroni host is handy if you have access to the Patroni host because you can use the very same configuration file from the ``patroni`` agent for the :ref:`patronictl` application. However, :ref:`patronictl` is basically a client and it can be executed from remote machines. You just need to provide it with enough configuration so it can reach the DCS and the REST API of the Patroni member(s). Why did the information from one of my Patroni members disappear from the output of :ref:`patronictl_list` command? Information shown by :ref:`patronictl_list` is based on the contents of the DCS. If information about a member disappeared from the DCS it is very likely that the Patroni agent on that node is not running anymore, or it is not able to communicate with the DCS. As the member is not able to update the information, the information eventually expires from the DCS, and consequently the member is not shown anymore in the output of :ref:`patronictl_list`. Why is the information about one of my Patroni members not up-to-date in the output of :ref:`patronictl_list` command? Information shown by :ref:`patronictl_list` is based on the contents of the DCS. By default, that information is updated by Patroni roughly every ``loop_wait`` seconds. In other words, even if everything is normally functional you may still see a "delay" of up to ``loop_wait`` seconds in the information stored in the DCS. Be aware that that is not a rule, though. Some operations performed by Patroni cause it to immediately update the DCS information. Configuration ------------- What is the difference between dynamic configuration and local configuration? Dynamic configuration (or global configuration) is the configuration stored in the DCS, and which is applied to all members of the Patroni cluster. This is primarily where you should store your configuration. Settings that are specific to a node, or settings that you would like to overwrite the global configuration with, you should set only on the desired Patroni member as a local configuration. That local configuration can be specified either through the configuration file or through environment variables. See more in :ref:`patroni_configuration`. What are the types of configuration in Patroni, and what is the precedence? The types are: * Dynamic configuration: applied to all members; * Local configuration: applied to the local member, overrides dynamic configuration; * Environment configuration: applied to the local member, overrides both dynamic and local configuration. **Note:** some Postgres GUCs can only be set globally, i.e., through dynamic configuration. Besides that, there are GUCs which Patroni enforces a hard-coded value. See more in :ref:`patroni_configuration`. Is there any facility to help me create my Patroni configuration file? Yes, there is. You can use ``patroni --generate-sample-config`` or ``patroni --generate-config`` commands to generate a sample Patroni configuration or a Patroni configuration based on an existing Postgres instance, respectively. Please refer to :ref:`generate_sample_config` and :ref:`generate_config` for more details. I changed my parameters under ``bootstrap.dcs`` configuration but Patroni is not applying the changes to the cluster members. What is wrong? The values configured under ``bootstrap.dcs`` are only used when bootstrapping a fresh cluster. Those values will be written to the DCS during the bootstrap. After the bootstrap phase finishes, you will only be able to change the dynamic configuration through the DCS. Refer to the next question for more details. How can I change my dynamic configuration? You need to change the configuration in the DCS. That is accomplished either through: * :ref:`patronictl_edit_config`; or * A ``PATCH`` request to :ref:`config_endpoint`. How can I change my local configuration? You need to change the configuration file of the corresponding Patroni member and signal the Patroni agent with ``SIHGUP``. You can do that using either of these approaches: * Send a ``POST`` request to the REST API :ref:`reload_endpoint`; or * Run :ref:`patronictl_reload`; or * Locally signal the Patroni process with ``SIGHUP``: * If you started Patroni through systemd, you can use the command ``systemctl reload PATRONI_UNIT.service``, ``PATRONI_UNIT`` being the name of the Patroni service; or * If you started Patroni through other means, you will need to identify the ``patroni`` process and run ``kill -s HUP PID``, ``PID`` being the process ID of the ``patroni`` process. **Note:** there are cases where a reload through the :ref:`patronictl_reload` may not work: * Expired REST API certificates: you can mitigate that by using the ``-k`` option of the :ref:`patronictl`; * Wrong credentials: for example when changing ``restapi`` or ``ctl`` credentials in the configuration file, and using that same configuration file for Patroni and :ref:`patronictl`. How can I change my environment configuration? The environment configuration is only read by Patroni during startup. With that in mind, if you change the environment configuration you will need to restart the corresponding Patroni agent. Take care to not cause a failover in the cluster! You might be interested in checking :ref:`patronictl_pause`. What occurs if I change a Postgres GUC that requires a reload? When you change the dynamic or the local configuration as explained in the previous questions, Patroni will take care of reloading the Postgres configuration for you. What occurs if I change a Postgres GUC that requires a restart? Patroni will mark the affected members with a flag of ``pending restart``. It is up to you to determine when and how to restart the members. That can be accomplished either through: * :ref:`patronictl_restart`; or * A ``POST`` request to :ref:`restart_endpoint`. **Note:** some Postgres GUCs require a special management in terms of the order for restarting the Postgres nodes. Refer to :ref:`shared_memory_gucs` for more details. What is the difference between ``etcd`` and ``etcd3`` in Patroni configuration? ``etcd`` uses the API version 2 of ``etcd``, while ``etcd3`` uses the API version 3 of ``etcd``. Be aware that information stored by the API version 2 is not manageable by API version 3 and vice-versa. We recommend that you configure ``etcd3`` instead of ``etcd`` because: * API version 2 is disabled by default from Etcd v3.4 onward; * API version 2 will be completely removed on Etcd v3.6. I have ``use_slots`` enabled in my Patroni configuration, but when a cluster member goes offline for some time, the replication slot used by that member is dropped on the upstream node. What can I do to avoid that issue? You can configure a permanent physical replication slot for the members. Since Patroni ``3.2.0`` it is now possible to have member slots as permanent slots managed by Patroni. Patroni will create the permanent physical slots on all nodes, and make sure to not remove the slots, as well as to advance the slots' LSN on all nodes according to the LSN that has been consumed by the member. Later, if you decide to remove the corresponding member, it's **your responsability** to adjust the permanent slots configuration, otherwise Patroni will keep the slots around forever. **Note:** on Patroni older than ``3.2.0`` you could still have member slots configured as permanent physical slots, however they would be managed only on the current leader. That is, in case of failover/switchover these slots would be created on the new leader, but that wouldn't guarantee that it had all WAL segments for the absent node. **Note:** even with Patroni ``3.2.0`` there might be a small race condition. In the very beginning, when the slot is created on the replica it could be ahead of the same slot on the leader and in case if nobody is consuming the slot there is still a chance that some files could be missing after failover. With that in mind, it is recommended that you configure continuous archiving, which makes it possible to restore required WALs or perform PITR. What is the difference between ``loop_wait``, ``retry_timeout`` and ``ttl``? Patroni performs what we call a HA cycle from time to time. On each HA cycle it takes care of performing a series of checks on the cluster to determine its healthiness, and depending on the status it may take actions, like failing over to a standby. ``loop_wait`` determines for how long, in seconds, Patroni should sleep before performing a new cycle of HA checks. ``retry_timeout`` sets the timeout for retry operations on the DCS and on Postgres. For example: if the DCS is unresponsive for more than ``retry_timeout`` seconds, Patroni might demote the primary node as a security action. ``ttl`` sets the lease time on the ``leader`` lock in the DCS. If the current leader of the cluster is not able to renew the lease during its HA cycles for longer than ``ttl``, then the lease will expire and that will trigger a ``leader race`` in the cluster. **Note:** when modifying these settings, please keep in mind that Patroni enforces the rule and minimal values described in :ref:`dynamic_configuration` section of the docs. Postgres management ------------------- Can I change Postgres GUCs directly in Postgres configuration? You can, but you should avoid that. Postgres configuration is managed by Patroni, and attempts to edit the configuration files may end up being frustrated by Patroni as it may eventually overwrite them. There are a few options available to overcome the management performed by Patroni: * Change Postgres GUCs through ``$PGDATA/postgresql.base.conf``; or * Define a ``postgresql.custom_conf`` which will be used instead of ``postgresql.base.conf`` so you can manage that externally; or * Change GUCs using ``ALTER SYSTEM`` / ``ALTER DATABASE`` / ``ALTER USER``. You can find more information about that in the section :ref:`important_configuration_rules`. In any case we recommend that you manage all the Postgres configuration through Patroni. That will centralize the management and make it easier to debug Patroni when needed. Can I restart Postgres nodes directly? No, you should **not** attempt to manage Postgres directly! Any attempt of bouncing the Postgres server without Patroni can lead your cluster to face failovers. If you need to manage the Postgres server, do that through the ways exposed by Patroni. Is Patroni able to take over management of an already existing Postgres cluster? Yes, it can! Please refer to :ref:`existing_data` for detailed instructions. How does Patroni manage Postgres? Patroni takes care of bringing Postgres up and down by running the Postgres binaries, like ``pg_ctl`` and ``postgres``. With that in mind you **MUST** disable any other sources that could manage the Postgres clusters, like the systemd units, e.g. ``postgresql.service``. Only Patroni should be able to start, stop and promote Postgres instances in the cluster. Not doing so may result in split-brain scenarios. For example: if the node running as a primary failed and the unit ``postgresql.service`` is enabled, it may bring Postgres back up and cause a split-brain. Concepts and requirements ------------------------- Which are the applications that make part of Patroni? Patroni basically ships a couple applications: * ``patroni``: This is the Patroni agent, which takes care of managing a Postgres node; * ``patronictl``: This is a command-line utility used to interact with a Patroni cluster (perform switchovers, restarts, changes in the configuration, etc.). Please find more information in :ref:`patronictl`. What is a ``standby cluster`` in Patroni? It is a cluster that does not have any primary Postgres node running, i.e., there is no read/write member in the cluster. These kinds of clusters exist to replicate data from another cluster and are usually useful when you want to replicate data across data centers. There will be a leader in the cluster which will be a standby in charge of replicating changes from a remote Postgres node. Then, there will be a set of standbys configured with cascading replication from such leader member. **Note:** the standby cluster doesn't know anything about the source cluster which it is replicating from -- it can even use ``restore_command`` instead of WAL streaming, and may use an absolutely independent DCS cluster. Refer to :ref:`standby_cluster` for more details. What is a ``leader`` in Patroni? A ``leader`` in Patroni is like a coordinator of the cluster. In a regular Patroni cluster, the ``leader`` will be the read/write node. In a standby Patroni cluster, the ``leader`` (AKA ``standby leader``) will be in charge of replicating from a remote Postgres node, and cascading those changes to the other members of the standby cluster. Does Patroni require a minimum number of Postgres nodes in the cluster? No, you can run Patroni with any number of Postgres nodes. Remember: Patroni is decoupled from the DCS. What does ``pause`` mean in Patroni? Pause is an operation exposed by Patroni so the user can ask Patroni to step back in regards to Postgres management. That is mainly useful when you want to perform maintenance on the cluster, and would like to avoid that Patroni takes decisions related with HA, like failing over to a standby when you stop the primary. You can find more information about that in :ref:`pause`. Automatic failover ------------------ How does the automatic failover mechanism of Patroni work? Patroni automatic failover is based on what we call ``leader race``. Patroni stores the cluster's status in the DCS, among them a ``leader`` lock which holds the name of the Patroni member which is the current ``leader`` of the cluster. That ``leader`` lock has a time-to-live associated with it. If the leader node fails to update the lease of the ``leader`` lock in time, the key will eventually expire from the DCS. When the ``leader`` lock expires, it triggers what Patroni calls a ``leader race``: all nodes start performing checks to determine if they are the best candidates for taking over the ``leader`` role. Some of these checks include calls to the REST API of all other Patroni members. All Patroni members that find themselves as the best candidate for taking over the ``leader`` lock will attempt to do so. The first Patroni member that is able to take the ``leader`` lock will promote itself to a read/write node (or ``standby leader``), and the others will be configured to follow it. Can I temporarily disable automatic failover in the Patroni cluster? Yes, you can! You can achieve that by temporarily pausing the cluster. This is typically useful for performing maintenance. When you want to resume the automatic failover of the cluster, you just need to unpause it. You can find more information about that in :ref:`pause`. Bootstrapping and standbys creation ----------------------------------- How does Patroni create a primary Postgres node? What about a standby Postgres node? By default Patroni will use ``initdb`` to bootstrap a fresh cluster, and ``pg_basebackup`` to create standby nodes from a copy of the ``leader`` member. You can customize that behavior by writing your custom bootstrap methods, and your custom replica creation methods. Custom methods are usually useful when you want to restore backups created by backup tools like pgBackRest or Barman, for example. For detailed information please refer to :ref:`custom_bootstrap` and :ref:`custom_replica_creation`. Monitoring ---------- How can I monitor my Patroni cluster? Patroni exposes a couple handy endpoints in its :ref:`rest_api`: * ``/metrics``: exposes monitoring metrics in a format that can be consumed by Prometheus; * ``/patroni``: exposes the status of the cluster in a JSON format. The information shown here is very similar to what is shown by the ``/metrics`` endpoint. You can use those endpoints to implement monitoring checks. patroni-3.2.2/docs/ha_loop_diagram.dot000066400000000000000000000177341455170150700177600ustar00rootroot00000000000000// Graphviz source for ha_loop_diagram.png // recompile with: // dot -Tpng ha_loop_diagram.dot -o ha_loop_diagram.png digraph G { rankdir=TB; fontname="sans-serif"; penwidth="0.3"; layout="dot"; newrank=true; edge [fontname="sans-serif", fontsize=12, color=black, fontcolor=black]; node [fontname=serif, fontsize=12, fillcolor=white, color=black, fontcolor=black, style=filled]; "start" [label=Start, shape="rectangle", fillcolor="green"] "start" -> "load_cluster_from_dcs"; "update_member" [label="Persist node state in DCS"] "update_member" -> "start" subgraph cluster_run_cycle { label="run_cycle" "load_cluster_from_dcs" [label="Load cluster from DCS"]; "touch_member" [label="Persist node in DCS"]; "cluster.has_member" [shape="diamond", label="Is node registered on DCS?"] "cluster.has_member" -> "touch_member" [label="no" color="red"] "long_action_in_progress?" [shape="diamond" label="Is the PostgreSQL currently being\nstopping/starting/restarting/reinitializing?"] "load_cluster_from_dcs" -> "cluster.has_member"; "touch_member" -> "long_action_in_progress?"; "cluster.has_member" -> "long_action_in_progress?" [label="yes" color="green"]; "long_action_in_progress?" -> "recovering?" [label="no" color="red"] "recovering?" [label="Was cluster recovering and failed?", shape="diamond"]; "recovering?" -> "post_recover" [label="yes" color="green"]; "recovering?" -> "data_directory_empty" [label="no" color="red"]; "post_recover" [label="Remove leader key (if I was the leader)"]; "data_directory_empty" [label="Is data folder empty?", shape="diamond"]; "data_directory_empty" -> "cluster_initialize" [label="no" color="red"]; "data_belongs_to_cluster" [label="Does data dir belong to cluster?", shape="diamond"]; "data_belongs_to_cluster" -> "exit" [label="no" color="red"]; "data_belongs_to_cluster" -> "is_healthy" [label="yes" color="green"] "exit" [label="Fail and exit", fillcolor=red]; "cluster_initialize" [label="Is cluster initialized on DCS?" shape="diamond"] "cluster_initialize" -> "cluster.has_leader" [label="no" color="red"] "cluster.has_leader" [label="Does the cluster has leader?", shape="diamond"] "cluster.has_leader" -> "dcs.initialize" [label="no", color="red"] "cluster.has_leader" -> "is_healthy" [label="yes", color="green"] "cluster_initialize" -> "data_belongs_to_cluster" [label="yes" color="green"] "dcs.initialize" [label="Initialize new cluster"]; "dcs.initialize" -> "is_healthy" "is_healthy" [label="Is node healthy?\n(running Postgres)", shape="diamond"]; "recover" [label="Start as read-only\nand set Recover flag"] "is_healthy" -> "recover" [label="no" color="red"]; "is_healthy" -> "cluster.is_unlocked" [label="yes" color="green"]; "cluster.is_unlocked" [label="Does the cluster has a leader?", shape="diamond"] } "post_recover" -> "update_member" "recover" -> "update_member" "long_action_in_progress?" -> "async_has_lock?" [label="yes" color="green"]; "cluster.is_unlocked" -> "unhealthy_is_healthiest" [label="no" color="red"] "cluster.is_unlocked" -> "healthy_has_lock" [label="yes" color="green"] "data_directory_empty" -> "bootstrap.is_unlocked" [label="yes" color="green"] subgraph cluster_async { label = "Long action in progress\n(Start/Stop/Restart/Reinitialize)" "async_has_lock?" [label="Do I have the leader lock?", shape="diamond"] "async_update_lock" [label="Renew leader lock"] "async_has_lock?" -> "async_update_lock" [label="yes" color="green"] } "async_update_lock" -> "update_member" "async_has_lock?" -> "update_member" [label="no" color="red"] subgraph cluster_bootstrap { label = "Node bootstrap"; "bootstrap.is_unlocked" [label="Does the cluster has a leader?", shape="diamond"] "bootstrap.is_initialized" [label="Does the cluster has an initialize key?", shape="diamond"] "bootstrap.is_unlocked" -> "bootstrap.is_initialized" [label="no" color="red"] "bootstrap.is_unlocked" -> "bootstrap.select_node" [label="yes" color="green"] "bootstrap.select_node" [label="Select a node to take a backup from"] "bootstrap.do_bootstrap" [label="Run pg_basebackup\n(async)"] "bootstrap.select_node" -> "bootstrap.do_bootstrap" "bootstrap.is_initialized" -> "bootstrap.initialization_race" [label="no" color="red"] "bootstrap.is_initialized" -> "bootstrap.wait_for_leader" [label="yes" color="green"] "bootstrap.initialization_race" [label="Race for initialize key"] "bootstrap.initialization_race" -> "bootstrap.won_initialize_race?" "bootstrap.won_initialize_race?" [label="Do I won initialize race?", shape="diamond"] "bootstrap.won_initialize_race?" -> "bootstrap.initdb_and_start" [label="yes" color="green"] "bootstrap.won_initialize_race?" -> "bootstrap.wait_for_leader" [label="no" color="red"] "bootstrap.wait_for_leader" [label="Need to wait for leader key"] "bootstrap.initdb_and_start" [label="Run initdb, start postgres and create roles"] "bootstrap.initdb_and_start" -> "bootstrap.success?" "bootstrap.success?" [label="Success", shape="diamond"] "bootstrap.success?" -> "bootstrap.take_leader_key" [label="yes" color="green"] "bootstrap.success?" -> "bootstrap.clean" [label="no" color="red"] "bootstrap.clean" [label="Remove initialize key from DCS\nand data directory from filesystem"] "bootstrap.take_leader_key" [label="Take a leader key in DCS"] } "bootstrap.do_bootstrap" -> "update_member" "bootstrap.wait_for_leader" -> "update_member" "bootstrap.clean" -> "update_member" "bootstrap.take_leader_key" -> "update_member" subgraph cluster_process_healthy_cluster { label = "process_healthy_cluster" "healthy_has_lock" [label="Am I the owner of the leader lock?", shape=diamond] "healthy_is_leader" [label="Is Postgres running as master?", shape=diamond] "healthy_no_lock" [label="Follow the leader (async,\ncreate/update recovery.conf and restart if necessary)"] "healthy_has_lock" -> "healthy_no_lock" [label="no" color="red"] "healthy_has_lock" -> "healthy_update_leader_lock" [label="yes" color="green"] "healthy_update_leader_lock" [label="Try to update leader lock"] "healthy_update_leader_lock" -> "healthy_update_success" "healthy_update_success" [label="Success?", shape=diamond] "healthy_update_success" -> "healthy_is_leader" [label="yes" color="green"] "healthy_update_success" -> "healthy_demote" [label="no" color="red"] "healthy_demote" [label="Demote (async,\nrestart in read-only)"] "healthy_failover" [label="Promote Postgres to master"] "healthy_is_leader" -> "healthy_failover" [label="no" color="red"] } "healthy_demote" -> "update_member" "healthy_is_leader" -> "update_member" [label="yes" color="green"] "healthy_failover" -> "update_member" "healthy_no_lock" -> "update_member" subgraph cluster_process_unhealthy_cluster { label = "process_unhealthy_cluster" "unhealthy_is_healthiest" [label="Am I the healthiest node?", shape="diamond"] "unhealthy_is_healthiest" -> "unhealthy_leader_race" [label="yes", color="green"] "unhealthy_leader_race" [label="Try to create leader key"] "unhealthy_leader_race" -> "unhealthy_acquire_lock" "unhealthy_acquire_lock" [label="Was I able to get the lock?", shape="diamond"] "unhealthy_is_leader" [label="Is Postgres running as master?", shape=diamond] "unhealthy_acquire_lock" -> "unhealthy_is_leader" [label="yes" color="green"] "unhealthy_is_leader" -> "unhealthy_promote" [label="no" color="red"] "unhealthy_promote" [label="Promote to master"] "unhealthy_is_healthiest" -> "unhealthy_follow" [label="no" color="red"] "unhealthy_follow" [label="try to follow somebody else()"] "unhealthy_acquire_lock" -> "unhealthy_follow" [label="no" color="red"] } "unhealthy_follow" -> "update_member" "unhealthy_promote" -> "update_member" "unhealthy_is_leader" -> "update_member" [label="yes" color="green"] } patroni-3.2.2/docs/ha_loop_diagram.png000066400000000000000000017655351455170150700177700ustar00rootroot00000000000000‰PNG  IHDR „ :î&¶bKGDÿÿÿ ½§“ IDATxœìÝ|Ýu}/ðW’¦I÷Ð4M›¤iùÕŠ(„9ƒGÃ[ü ^™ëîÜ.^眛w»»ÎéÃm¢ó±‰:':çd¨üÚ¼Ì!RP˜eŒß’´MÓpú‹¦i›äþç¬! h{šöù|<Î#ŸÏ9Ÿó=ïwmSÂywÕÐÐÐP7ª+]/Œ@À8#0ÎL¨tDzÎÎÎüøÇ?®tG´3Î8#---•.Ž(UCCCC•.àXõ¶·½-ßøÆ7*]Æí­o}kþáþ¡ÒeÀ¥ºÒË’•I†ÜF½­üé¯0Œ@À8#0΄Œ3!ãŒ@À8#0΄Œ3!ãŒ@À8#0΄Œ3!ãŒ@À8#0΄ŒwCI®NrV’YI&%9-Éï'ùÏ眭: õŽ×€cœ@Àx÷ÇIþ1ÉUIÖ'ÙäÓIþ%É©¬ 8d&Tº^¢¿Nr’¹?Ý×'yM’¿O²¬RE‡’ !ãÝ®$3F¹i’¡ýöUû}-ÝÞ³ßã×'i˳’–$ïO²ã9×,=ï‰$&)ìwß¼pP„Œw+’ü^’âÏ87´ß×Òí‹û=þ«IÞ•äé$wýôëÿã¿•äŠ$=Iþù¼pPL¨t¼D_Lò‘$K’œ™ä‚<îh|×ÙšÈä$Ÿþé5GóûI~á§ëóŸó\à3!`¼›œä/“t$¹$ÏN÷xE’w$Ùö®[ȳSBFóó/ẇжmÛrÍ5×ä7ó7óÐCUº8dL8ZÌHò֟ޞɳáË’|åž»9ɇ“|7Io’Ÿq~ò‹/ó`{ì±Çrà 7äúë¯ÏwÜ‘ÁÁÁ ¥­­-§œrJ¥Ë€CB àh4%ÏN 9õÏ_œä¤$w$Yÿz—iÕÁ/í¥ÈêÕ«sã7æÚk¯Íã?žššš epp°|nÚ´i¬-€ñ®*É“IZžsÿ„$SG9;š;“|#ÉÌýîÛõê9Èžzê©ÜvÛm¹þúëóï|';wîÌĉ³gÏž$φDžkêÔç6G€£ÁÅI>™äeIj’<”äCI~ó9瓬NòsIþ5ɯ'Y›ä’ün’ßO2?Iw’?y‘µŒõ/F1¹åÞ[2gΜTUU¥ºº:ûöíK’rd,!ÍBÆ»ÕIþ6É%y6ÈQ“ää$—&ùíçœý‹$ïH²>IS’Ïýôþ«“\‘äÌ$[“œ˜äzU’¡Ÿž«zÎ×Òýò/Æ`Òßߟ¡¡¡ÔÔԔà â5¯yMŽ;î¸Lš4)õõõ) Ãnrÿœ9sR[[û€C£jhhh´oÃ]tQ®É5É7+]Éê¢deVæ²Ë.˧?ýé|ç;ßIUUÕC>ùÉO¦¶¶6Û·oÏŽ;²}ûö‹Åòú¹÷eúôéåÛ´iÓ2}úôÌœ93Ó§OÏŒ3FJfΜ9l?qâăù+IB¹lذ!]]]Y»vmùÖÕÕ•»ï¾;ÏÙ(2–ŸB¾ùÍgz{{ó•¯|%õW•uëÖe„ c†C¶lÙ’Y³fðKí)­‹ÅâˆðÈöíÛ³uëÖlß¾=Û¶mK±X,ßöîÝ;âºS¦LyÞÀÈóJêëë_ܯG=€—hË–-ÃBk׮ͺuëÒÝÝîîîlذ¡Z¨©©ICCCš››³páÂüä'?É#/D d,Ï „” æ¶ÛnË_ÿõ_çºë®KMMMöìÙ3ìLÿaŸÎ±sçÎlݺuXHd´ÛhgúûûG\oÒ¤I##Çw\fÍš•Y³feîܹ™={vy_Z×ÔÔÖ¾8üBžGÖ¯_Ÿžžžlذ!éèè(ï{ì±lß¾½|¾P(¤¡¡!óçÏOkkë°ukkkššš2a„òù‹.º(×䱌Ù_www¾øÅ/æóŸÿ|6oÞœªªªTUU99äHµk×®ç Œ”nO?ýtžzê©lÙ²%›6möû¯ä¹‘Ò×9sædöìÙ£†Hªªª*Ð5/–@pL+‹#Bûï{{{Sz»e]]]G„š;v”Ï …a“=ÚÛۇ훚š2a‚·Vrì( ) |¾¯¯¯<}¤··7›6mʆ ²iÓ¦lܸ1ÝÝݹçž{²qãÆ<ýôÓÞ;eʔ̟??sçÎÍܹsËëã?>óæÍËܹsÓÐÐã?>“&M:Ø­•ü«8âìÝ»7›7oòØÊGooo†††’<;© ìhhhȲe˲jÕªràãÄOÌ´iÓ*ÜŒo“&MÊÂ… ³páŸy¶¿¿¿ügxãÆÙ´iSzzz²yóælܸ1÷ß9X²uëÖaÏ:uê°ÀHCCCæÌ™“yóæÔ×תvŽx!ÀaW,G y”ö]]]H’ÔÖÖföìÙåÀG[[[y] €444¤ªªªÂ]%uuuY°`A,Xð3Ïö÷÷ç©§žJ±X̆ ÒÓÓ3lýðÃç¶ÛnK±XKþ+ V í?õ§´^¸pajkke»!T»wïNOOϘ“=º»»³sçÎòùB¡0ìMÜíííÃöÍÍÍ©©©©`GÀ¡TWW—ùóçgþüùY¶lÙóžÝ½{wyÚȦM›²nݺôööfíÚµéííÍC=” 6ä©§ž*?§ºº:Ç|ù5J·ÆÆÆÌ›7/MMMillÌqÇw¨[8¨B€¶gÏžlÙ²eÌÉ¥uIéÓûKŸÖ¿|ùòa“=š››3uêÔ vŒ'õõõijjJSSÓóž+M)MÙÿëúõësÏ=÷ä‰'žÈÖ­[ËÏ©««Kccã¨SFL#ŽD!@Y±X3äÑÑÑ‘®®® $I&Nœ˜Y³f9Ù£´8ÜöŸ:²|ùò1ÏíØ±#k׮ͺuë²nݺ¬]»6k×®Íúõësã7fíÚµÙ¾}{ùü”)SÒÔÔ” ¤±±qغ¹¹YÈ 8¬BàÑ××7jÈ£´ïêêÊ3Ï_(ÊÁŽÑÍÍÍ©©©©`G/Í´iÓ²téÒ,]ºtÌ3cýÝÙÓÓ“ûï¿Ĥ‘B¡0ìïÊý§"ÍŸ??‹-2e8(Bà(°gÏžlÙ²eDÈcÿ}±X,Ÿ¯¯¯6ÉcùòåÃÞ´ÜÒÒ’)S¦T°#€#äI“Ê?ŽeÛ¶méîîNWWW:;;ÓÝÝîîîüÇüGn¸á†lذ¡|vòäÉiii)Oijj*¯[ZZÒÐÐêêêÃÑ0Î „À8P,Çœì±aÆtvvfpp0I2qâÄÌš5«øxîdÖÖÖ … wpô˜1cFN=õÔœzê©£>¾gÏž¬[·nÄßá?þxn¿ýötvvf×®]I’ÚÚÚ,\¸pØt‘ýoþþJB ÂúúúÆœìÑÓÓ3ìÂIR(Ê“<–-[–+V”÷óçÏOsssjjj*Øû›8qâóNLOOOyÂHGGGž|òÉ<òÈ#ùîw¿›uëÖ•C¥ï‹-öµµµ5MMM™8qâál ¨ 8„úûû³~ýú!Òú‰'žÈÖ­[Ëçëëë‡Mòhkk+ï²hÑ¢Lž<¹‚p°UWWgÁ‚Y°`AÚÚÚF<¾gÏžtvvæÉ'Ÿ,‡E:::rË-·¤£££ü}¤¦¦& ,(‡D–,Y’N8!K–,É’%K2uêÔÃÝp „ÀKP,G„<ößwvv–?Õ}âĉY°`Ay’G{{{.¾øâràcñâÅ™9sf…;àH3qâÄœxâ‰9ñÄG}üé§ŸéèèÈ¿þ뿦»»;I’yóæ•"ûE–,Y’iÓ¦Ζ€ƒ@ ÆÐ××7jÈ£´_»vmöîÝ[>_(Ê“<–-[–+V”÷óçÏOKKKª««+ØG£ãŽ;.Çw\–/_>â±½{÷fíÚµåï]¥Û×¾öµ<òÈ#å°Hé{Xé¶téÒ,[¶,K–,ÉŒ3wKÀà˜Ôßߟõë×9ÙãñÇ϶mÛÊçëëëË“5B8¢ô÷÷gýúõ#B¥ýc=–íÛ·—Ï …QC¥HSSS&L˜PÁŽàذwïÞ¬]»6yàòàƒ¦££#÷ß6nܘäÙïÛK—.ͲeËÊ__ö²—eÞ¼y®Æ«b±8"ä±ÿ¾··7¥·¶ÕÕÕ¥±±qDÈ£´?á„2}úô wü,½½½åÈþçæÈ<­[·&I²lÙ²òíe/{Y–.]š3fT¸r8r „pЋÅ1'{ttt¤»»;ûöí+Ÿ/ #BûïRUUUÁŽ€CiݺuyðÁsÿý÷—Ã">ø`vîÜ™$ijjÊÒ¥KË‘—½ìeY¶lY&Ož\áÊ¡òB8 »wïNOOψGiýè£fÇŽåó…BaÌÉ­­­ijjÊ„ *Øp¤êééɃ>Xž*òÀdÍš5yæ™g’<;QdùòååÛ²eËÒÚÚZáªáð {÷îÍæÍ›G„<öŸòÑÛÛ›Ò[ÎêëëËÁŽýC¥õ‰'ž˜iÓ¦U¸+àh200'Ÿ|2kÖ¬É}÷ÝW¾uww'IæÌ™“ÓN;-¯xÅ+rÚi§å´ÓNË)§œ"€ÊQK àP,G y”ö]]]H’ÔÖÖföìÙ£>Jû†††TUUU¸+€dÛ¶m¹ÿþûsï½÷–§‰üû¿ÿ{úúúR[[›N8¡¹;mmm9ãŒ3RWWWér9ʄǜ¾¾¾1'{ôôô¤³³3»ví*Ÿ/ cNö˜?~ZZZR]]]ÁŽ8ìܹ3kÖ¬)‡DV¯^§žz*“'OÎ+_ùÊrHäÜsÏÍܹs+].ãœ@pTéïïÏúõëG„ûìœsÎ99ùä“+].ãŒ@0®‹Å!ý÷L’ÔÕÕ¥±±qDÈ£´^¼xqfΜYáŽ8–lݺ5«W¯Î]wݕիWgõêÕÙ¹sgæÍ›—sÎ9'çž{nÎ=÷Ü,]º4UUU•.—#˜@pÄèëë5äQÚ¯]»6{÷î-Ÿ/ #Bûï[ZZR]]]ÁŽàù dÍš5¹ãŽ;òÃþ0·ÜrKŠÅbæÌ™“3Ï<3gŸ}vÚÛÛóÊW¾Ò¿mF 8,úûû³~ýú1'{<þøãÙ¶m[ù|}}ý¨!Ò~áÂ…©­­­`Gpð äá‡.‡Cn»í¶<õÔS™6mZÎ<óÌ´··§­­-gžy¦ãB€ƒ¢X,Ž:Ù£´~òÉ'Sz»R]]]Çœì±dɒ̘1£Â@å æ¾ûîËí·ßžïÿûùÁ~§žz*3gÎÌÙgŸ×¼æ5iooÏ©§žšªªªJ—Ëa$üLÅbqÔG)ÒÝÝ}ûö•Ï …Q'{”Ö---©®®®`G0> æÈ÷¿ÿýrHdË–-ihhÈyç—×½îu9ï¼ó2wîÜJ—Ê!&Ǹþþþ¬_¿~DÈ£´ì±Ç²}ûöòùB¡0jÈ£ijjÊ„ *Ø[xàÜxã¹å–[òƒü ýýýimmÍ\+Väì³ÏN}}}¥Ëä €£\±XòØßÛÛ›ÒÛˆêêêÒØØ8"äQÚŸp ™>}z…;Ʋk×®Üyç¹å–[rË-·äÞ{ïͤI“ÒÖÖ–ööö´··çôÓOOUUU¥Kå%€q¬X,Ž9Ù£££#ÝÝÝÙ·o_ù|¡PòØßÐÐàapéêêÊÍ7ßœ›o¾9·ÞzkŠÅb,X×½îuyÝë^—öööÌš5«Òeò"„Àj÷îÝéééò(­}ôÑìØ±£|¾P(Œ9Ù£µµ5MMM™0aB;*i`` kÖ¬)O¹ýöÛ300W¼â¹à‚ ò–·¼%§œrJ¥Ëä „@ìÝ»7›7oòØÊGoooJo﩯¯/;öy”Ö'žxb¦M›VᮀñdÛ¶m¹ùæ›sýõ×çŸÿùŸóôÓOgéÒ¥yãߘ7¾ñ9óÌ3S]]]é2ƒ@ÅbqÔGißÕÕ•$ImmmfÏž=jࣴohhHUUU…»ŽVY½zun¼ñÆ\wÝuyøá‡3{öìœþùY±bE^ÿú× ¡aBàÚ½{wzzzÆœìÑÝÝ;w–Ï …QC¥}sssjjj*ØÀp¹á†rã7æûßÿ~&L˜³Ï>;\pAV®\™ùóçWºÄcž@ìgÏž=Ù²e˘“=Jë’úúúçìÑÜÜœ©S§V°#€—¦··77Þxc®¿þúÜrË-éïïÏ™gž™7½éMyË[Þ’æææJ—xLà˜R,Ç yttt¤««+I’‰'fÖ¬YcNö(­Ž»víÊ÷¾÷½\ýõ¹îºëòôÓO笳ÎÊ[ßúÖ¬\¹2 •.ñ˜!ÀQ£¯¯oÔGißÕÕ•gžy¦|¾P(ŒòhmmMsssjjj*ØÀ‘k`` «W¯ÎW¿úÕ|ãßÈÎ;óªW½*+W®Ì[ßúÖüñ•.ñ¨&À¸°gÏžlÙ²eDÈ£´â‰'²uëÖòùúúúQC¥}KKK¦L™RÁŽŽýýý¹ùæ›sÍ5×äÛßþvúúúrÖYgå’K.É[Þò–̘1£Ò%uB8"‹Å1'{lذ!L’Lœ81³fÍò(í/^œ™3gV¸#€cS___n¹å–|õ«_Íõ×_Ÿ$9ï¼ó²råÊ\xá…™:uj…+<:„pÈõõõ9Ù£§§'ÙµkWù|¡Ps²ÇüùóÓÒÒ’êêê vÀ(‹ùö·¿üÇÌ­·ÞšI“&åWõWsÉ%—¤½½ÝÛ½!¼$ýýýY¿~ýˆGiýøãgÛ¶måóõõõcNöhhhÈ¢E‹2yòä vÀ¡°iÓ¦üÓ?ýSþþïÿ>wÞyg.\˜K.¹$ïz×»r 'Tº¼qG €çU,G„<ößwvvfpp0IRWW—ÆÆÆ!ÒzñâÅ™9sf…; Òyä‘üÝßý]¾úÕ¯¦§§'mmmùïÿý¿ç¢‹.Ê´iÓ*]Þ¸ q¾ô¥/e`` Òe/QoooV®\™SN9¥Ò¥ð<úúúF y”ök×®ÍÞ½{Ëç …ÂˆÇþû–––TWWW°#Æ“ÁÁÁÜvÛm¹úê«síµ×fpp0+V¬ÈûÞ÷¾´µµUº¼#š@pĹꪫ²jÕªJ—¼D÷Þ{o’dùòå®àØÕßߟõë×9ÙãñÇ϶mÛÊçëëëG y”ö .Lmmm;àhV,sõÕW窫®Êƒ>˜Ÿû¹ŸËªU«ò¶·½-S§N­tyGàˆ#G€C¯X,Ž:Ù£´~òÉ'SzkH]]]Çœì±dɒ̘1£ÂÀ³~ðƒäóŸÿ|®½öÚLœ81ïxÇ;òÛ¿ýÛY¶lY¥K;bL¨tŒT,G y” ÝÝÝÙ·o_ù|¡P(‡<–/_^}”‹-JUUU;€÷êW¿:¯~õ«så•Wæ+_ùJ>ÿùÏçoþæoòK¿ôK¹ì²ËrÁ¤ºººÒeV”@Àa¶{÷îôôôŒy”ö=öX¶oß^>_(†Möhkk6壩©)&xGŸY³fåòË/ÏûßÿþÜzë­¹òÊ+ó¦7½)‹-ʪU«ò¿ñ9î¸ã*]fET •æ‚!®ºêª¬ZµªÒe/ѽ÷Þ›$Y¾|y…+8üŠÅâˆÇþûÞÞÞ”Þ²QWW—ÆÆÆaý'{œp ™>}z…;€#Ç#<’Ï~ö³ùÊW¾’$¹ôÒKsùå—§¹¹¹Â•^!ÀG Ž)@j¥ IDAT!ÀѪX,Ž9Ù£££#ÝÝÝÙ·o_ù|¡PòØßÐЪªª vãÓ¶mÛò¥/})Ÿþô§³aƼõ­oÍ?øÁ¼üå/¯ti‡…Y¡À1£ô+Ÿ“Œe÷îÝéééò(­}ôÑìØ±£|¾P( y´··Û755eÂoÏ€CaÆŒ¹üòËó?ÿçÿÌw¾ó|âŸÈi§–¶¶¶|èCÊŠ+*]â!å'À1aïÞ½Ù¼yóˆÇþS>z{{Ë!òúúúr°£¡¡!Ë–-˪U«ÊO<1Ó¦M«pW@mmmV®\™•+WæŽ;îÈ'>ñ‰¼ñoÌ«^õª|øÃ>jƒ!!ÀQ¡X,Žò(í»ºº200äÙ7ŠÌž=»øhkk+¯K†††ò¤A`|8ûì³söÙggõêÕù“?ù“r0ä£ýh^ÿú×Wº¼ƒJ 8âíÞ½;===cNöèîîÎÎ;Ëç …By’GkkkÚÛۇ훛›SSSSÁŽ€CéU¯zUþùŸÿ9wß}wþÏÿù?9ÿüósæ™gæÿïÿóÏ?¿Òå!p”)}zí®]»ò»¿û»ùæ7¿™§žz*ûöíöøÐÐИÏÝÿ±Ò}ûöíËÇ?þñ|ñ‹_̆ ÒÚÚšßýÝßÍ»ßýîUç¾}ûò¹Ï}._ýêWóðÃgpp0?ÿó?Ÿ|à™2eJ^ûÚ×fÞ¼yéèèȤI“†=·¿¿?‹/ÎúõësË-·äµ¯}íó^ïg€þÚ×¾–/~ñ‹Y³fMúúú²páÂ\xá…ùÈG>’3f¼¨þ€·gÏžlÙ²eÌÉ¥uI}}}9ØÑÐÐåË—›ìÑÜÜœ©S§V°#àHqæ™g榛nÊ}÷Ý—}ìcù•_ù•œyæ™ùÄ'>‘sÎ9§Òå½$UC£ý__€ ºêª«²jÕªJ—ãV)Àñæ7¿9×^{mùþÒ_l äÒK/ÍßþíߎxÎ7¿ùͬ\¹òÕ¸wïÞ\pÁ¹ùæ›G}|hh(mmm¹óÎ;ó©O}*øÀ†=þÙÏ~6—]vYÎ8ãŒüèG?: ëÖßÐÐPÞùÎwæë_ÿú¨Ï[¶lYî¼óÎLŸ>ýõ<ëÞ{ïM’,_¾¼Â••V,Ç yttt¤««+I’‰'fÖ¬YåÀG)ä±dþüùî¯îºë®|èCÊí·ßž_ûµ_ËŸþéŸæä“O®tY/Š !p”ºë®»rÓM7åœsÎ9(ŸŒû½ï}/7ÜpCÎ9çœlÛ¶-¿ó;¿“믿>ŸùÌg^p ä3ŸùLn¾ùæL:5ûØÇrá…fΜ9ù÷ÿ÷|ò“ŸL’üáþaÎ?ÿü|ò“ŸÌÿøÿ#S¦LIòìt?û³?K’|øÃ>àëæK_úR¾þõ¯§±±1ù—™×¼æ5™2eJÖ¬Y“÷½ï}ùñœ?ýÓ?-¿0R__ߨ!Ò¾««+Ï<óLù|¡P(;Z[[ÓÞÞ>,ðÑÜÜœššš vÍÎ:ë¬üÛ¿ý[nºé¦|øÃΩ§žšw¿ûÝù£?ú£q÷!&„GBà¥)MÁøÖ·¾•_ûµ_óñ:!äæ›oÎyçW¾ÿÉ'ŸLkkk¦M›–íÛ·¿ _ñŠWä¾ûîËUW]•ßøßóÜgœ‘{î¹'ög–}èCIþk:È)§œ’x UUU|½çöwÖYgåî»ïÎí·ßžW¿úÕÃÎvttdñâÅ9á„ò裾 þ€g™ãßž={²eË–!Òþ‰'žÈÖ­[ËçëëëGLòØÊGKKK9ì Piƒƒƒ¹öÚkó{¿÷{éííÍ?øÁüÞïý^êëë+]ÚŽ8!ðÒ”BO?ýt …˜¿Ð@H__ß°| ¥ººzÌk=ŸÉ“'§¯¯/›6mÊœ9sÆŸþô§sÒI'Uº¼1 „À1æøãOooo:;;³hÑ¢òý?øÁ[ 'tRÖ¬Y“ë®».ïyÏ{ÆþÜë}ík_ËÅ_œ$ù¿ÿ÷ÿæ#ùÈ‹ºÞhõ^vÙeùìg?û¼õú1*¼8÷Þ{o’dùò宎,}}}£†ò‘d„ #ž{(!ɳ!Ž+¯¼2_ýêWó裦¦¦&gœqF.¿üò¬X±bØÙ»ï¾;guV¦M›–îîîÌœ9óE]o¬zo½õÖüÍßüMV¯^M›6e„ immÍ/ÿò/ç’K.Éi§ö‚ûB86õ÷÷gýúõcNöxì±Ç²}ûöòùúúúQC¥ýÂ… S[[[ÁŽH’k®¹&ï}ï{3uêÔ\}õÕyõ«_]é’B€#@ð\—^zi¾üå/çø@>õ©OUºà „p4*‹£Nö(­Ÿ|òÉr¹®®.cNöX²dIf̘QáŽ8P½½½ùõ_ÿõü¿ÿ÷ÿòû¿ÿûùèG?ZþнJ¨Ü+€ÎÎÎÜtÓMIž †À¡R,G y” ÝÝÝÙ·o_ù|¡P(‡<–/_^}”‹-*O©`ü›7o^nºé¦\}õÕyï{ß›ï~÷»ùÚ×¾–O<±"õ„žÑí… -ÞÿšoxòtéÒ\$ÉîÝ»ÓÓÓ3"äQÚ?öØcÙ¾}{ù|¡P6Ù£­­mØ”¦¦¦Š~ú#•sÉ%—äçþçóÎw¾3Ë—/ÏUW]•·½ím‡½?ŽhõõõùÅ_üÅ|á _¨t)ÁŠÅâˆÇþûÞÞÞr8¹¾¾>óçÏ/>ÚÛÛ³jÕªòþ„NÈôéÓ+ÜG²“O>9«W¯Î‡>ô¡¼ãïÈ]wÝ•O}êS©­­=l5„Å ™üQÉk0þ‹Å1'{ttt¤»»;ûöí+Ÿ/ åIÏìÑÐІ††žpc©­­Í_üÅ_äÜsÏÍ»Þõ®Üyçù§ú§477–× bvïÞžžž!ÒúÑGÍŽ;Êç …By’GkkkÚÛۇ훚š2a‚ÿÀáó«¿ú«¹óÎ;óæ7¿9gžyf¾õ­oå~áùëú)‡ÄÞ½{³yóæ!çNù(©¯¯/;²lÙ²¬Zµªø8ñÄ3mÚ´ v£[ºti~ô£åï|gÚÛÛsõÕWç¿ý·ÿvH_S €¥X,ŽòèèèHWWW’$µµµ™={v9ðÑÖÖV^— ©ªªªpWðâL›6-ßúÖ·rùå—碋.ÊG?úÑüÑýÑ!{=Fؽ{wzzzÆœìÑÝÝ;w–Ï …ò$ÖÖÖ´··Û777§¦¦¦‚À¡WSS“+¯¼2---¹âŠ+²eË–|æ3ŸIuuõA-€cÌž={²eË–1'{”Ö%õõõå`GCCC–/_>l²Gsss¦NZÁŽàÈòþ÷¿?ÍÍÍyÇ;Þ‘íÛ·çË_þòAÿÀ€£L±X3äÑÑÑ‘®®® $I&Nœ˜Y³f9Ù£´^˜ /¼0ßýîwsÁ¤¿¿?ÿ÷Ÿ ^ŒC `éëë5äQÚwuuå™gž)Ÿ/ å`ÇhæææƒþÉ„À³Î=÷ÜÜtÓMù•_ù•¼ímoË׿þõÔÖÖ”k „!öìÙ“-[¶Œy”öO<ñD¶nÝZ>___?l’ÇòåËÓÚÚZÞ·´´dÊ”)ì8çœsrà 7ä‚ .ÈÅ_œ¯ýë©®®~É×8LŠÅ☓=6lØÎÎÎ &I&Nœ˜Y³f•ííí¹øâ‹ËûÅ‹gæÌ™î8¿ø‹¿˜ë¯¿>çŸ~®¸âŠüÅ_üÅK¾¦@ÀAÐ××7ædžžžtvvf×®]åó…B¡<ÉcÙ²eY±bEy?þü´´´”O Ž ¿ôK¿”«¯¾:oûÛ³pá¼ÿýïI×øúûû³~ýú!ÒúñÇ϶mÛÊçëëëË““&MJuuuf̘‘$™:ujjkk3eÊ”Lž<9Ó¦MËôéÓ3yòäLž<9…B!“'OΔ)SÊÕÔÔ¼€_U8pÅbqÔG)ÒÝÝ}ûö•Ï …ò$åË——'{”‹-*ÿ™^÷º×åòË/ÏW\‘7¼á ihh8àçV •>.àqÕUWeÕªU•.†††²yóæ<ýôÓ)‹)‹ÃÖÏÝ—Ö;vìÈÎ;Ÿ÷ÚÓ¦M+#JÁ‹R8¢´¨ªªÊÌ™3“üWТ¶¶6S§N-_§®®.“'Oóuž/pQ ™ŒfïÞ½ÃzسgOžyæ™$ϾÉ>I9¸²oß¾ìØ±#ɳ!˜ÁÁÁìܹ3»víÊÎ;Ë÷¥®®.Ó¦MK¡PH¡PÈqÇW^µ?î¸ã2gΜLœ8qÌërtÛ½{wn¾ùælÞ¼9Ó§O1åãÑG-ÿ¾Lž {”‚û‡óæÍËœ9s2wîÜòúøãOccc&MšTÎy±ŠÅâˆÇþûÞÞÞ”>›®¾¾>óçÏ/‡<ÚÛÛ³jÕªòþÄO,O¸8ÖLž<9_øÂòÚ×¾6ßùÎwò¦7½é€žgBpÄ1!Ž^ƒƒƒéííÍÚµk³nݺ¬[·.ÝÝÝåuWWWz{{‡MŘ4iRæÎ›†††Ì™3'sæÌ)¯K÷—&R …L™2¥‚ÛöìÙS‡<õÔSÙ¼ysz{{³iÓ¦òzÿOi’IÉìÙ³ÓØØ˜¦¦¦,\¸0 ,È‚ ÒÔÔT^×ÕÕU¨»cK±X5äQÚwww c …ò$ý'{ô÷÷göìÙùå_þåTUUU°#€#ß%—\’Ûo¿=<òÈý<\ 8â„Àø¶sçÎ<ñÄ£ÞÖ­[—½{÷&Iªªª2oÞ¼òÿ.\8,Pš&1uêÔ wÄ¡²gÏžlÚ´)7nLOOϰpPi½~ýúô÷÷—Ÿ3oÞ¼,Z´(‹/q;þøã+ØÍóûÞ÷¾—üãyûÛßž÷¼ç=ÿŸ½;«²Îÿÿÿ}tçw*&&FwÞy§œUYY©U«VéÅ_Tnn®êêêÔœ¯ÞåííÝèÊõÛ×!ðó :TNNNÚ¸qc£}_;´kÇŽÓ¦M›´k×.íÞ½['Ož”‡‡‡¢££5}út 8ð²+×???ùùù)66ÖÜväÈ%''k÷îÝz÷ÝwõÏþS...ºé¦›tøða•••]ñqvìØ¡;ï¼³%K\c¦Nª|PÇW`` Õ>B¸FÔÖÖê›o¾ÑúõëµaÃ8p@8p fÍš¥Êd2ÉÁ¯€æ SXX˜¦N*é§•v’““µvíZýðÃæ~ƒAvvvª©©irƒAÇ¿ª5Ú¿¸¸8yxxhíÚµzúé§­öáÚ±ššmÛ¶M|ð6nܨ'N($$D±±±š?¾ $'''[— \3BCCªI“&I’8 „„}ôÑGÊÌÌ”Á`P]]$ÉÑÑQÕÕÕæ×õmB—ãä䤘˜mݺµÑ@ˆ]+×Z@FF†æÌ™£ >\ÐSO=¥ýû÷ëèÑ£úÏþ£!C†®²Þ½{ë_ÿú—ÒÓÓ•ŸŸ¯%K–èþûï—“““êêêÔµkWuèÐA’äàà ªª*!€f6l˜víÚ¥ .X}ŸBh'*++µfͽõÖ[JNN–¯¯¯yäMš4Iaaa¶.¸îùøøhòäÉš]_ýµ8 ±cÇꫯ¾Ò† ´sçNeddغD@çïï¯=zhïÞ½Vß'@U]]­7ÞxC={öÔK/½¤qãÆ)##C~ø¡î¿ÿ~ÙÛÛÛºDÍЫW/-X°@ÙÙÙZ¸p¡vìØ¡ððp͘1C'Nœ°uy€6,22RiiiVß#@´ÿ~õïß_O=õ”F¥üQóçÏW@@@«ß`0˜mEKÖÓZs³ÕùKMMÕàÁƒåááÑæ®ã¥.þ¬ ¹¸¸( @±±±Z¾|¹*++›‘‘¡iÓ¦)$$D:t§§§ú÷ï¯^xAßÿ½¹_]]tÇwÈËËK...ºå–[ôüóÏëàÁƒ­1MI’³³³üq>|Xï¾û®>ùäÝxãŠW]]]«Õh?"##•ššjõ=!´1óçÏWTT”ÜÜÜ”ššª7ß|S>>>­ZCýÍémé&õ–¬¥-ÍëjøÍo~£éÓ§ëôéÓÚ¶m›­ËiÒÅŸµºº:9sF{öìÑ„ ôÞ{ïÉd2éÇl0nëÖ­úÍo~£_ýêWúþûïuöìY}ûí·z衇´xñbÝrË-æ¾/½ô’V¯^­øøxåææ*??_ÿþ÷¿µyófõîÝ»ÕæZÏÎÎNcÇŽUjjª¦L™¢'žxB£FÒÙ³g[½@Û¡#GŽXý%Cݵþ€v'>>^Ó¦M³u@««­­Õ”)S” üãúýïoÓ• C› N´dM­1?[CGGG•——ËÞÞ¾Õýs4užâããõòË/ë»ï¾S§N$I999=z´vîÜ)z^I[ IDATc’’’4dÈó>}||tàÀuíÚÕ¢_ZZš"##mþ9ÿâ‹/4vìXuìØQIIIò÷÷·i=-)%%E’d2™l\ ´O_}õ•î¸ã;vLÁÁÁï±BmÄŒ3´råJmذA³fͲiäJgÆ kpÎêÇeddèW¿ú•ŒFc£u^<¦ººZ2 úÃþÐì}5ç:Ö9tèî¸ã¹ººê®»îRzzºRSSÕ¯_?¹»»ëž{î¹¢kÖ˜iÓ¦ièСzå•WÌm ,ÐôéÓ­†A$)&&Æ"äQVVf“\,""ÂæaI0`€öíÛ';;;ÅÄĨ¨¨ÈÖ%ÚˆŽ;J’JJJ¼G €6àÃ?Ô›o¾©•+Wjøðá¶.犤§§køðáúŸÿùedd˜Æ SzzºE߸¸8=úè£:s挾üòK9sFO>ù¤EŸŒŒ óþ233•‘‘¡_ÿú×zàšUÏ‘#G4~üxÍž=[§NÒ—_~©óçÏkذaMŽk,`­ýÑG•§§§öîÝ«³gÏê“O>Qzzºúõë×`\]]ù±téRI?³!C†(66V™™™:vì˜&L˜ ¸¸8åää4ØÇôéÓ5kÖ,åååé“O>iÖ<êùü£Yûjîu¬ßÏܹsµlÙ2åççë–[nÑ„ ô§?ýIË—/Wnn®ÂÃÃõûßÿ¾ÉZ›kêÔ©úøãͯ·lÙ¢Áƒ7{|ll¬þð‡?´é …¯¯¯¶oß®sçÎiæÌ™¶.ÐFÔÿà‘µ@ˆ¡®-üì\$>>^Ó¦M³u@«©««SDD„úõë§åË—Ûº3ƒÁЬÕ&L˜ Ûn»MO?ý´Eû‚ ôí·ß*11±Ñ±EEEêÑ£‡ Ím'NTTTTƒý-_¾\“&MºlM=ô†®‰'šÛ>¬›nºÉb¬µù56çKÛ=<<”••¥Î;›ÛŽ=ªÐÐÐËCúéœÝ|óÍš={¶EûÛo¿­ýû÷káÂ…ûعs§î¾ûî&çÝÜy4¶¯+¹ŽƒA»víÒ Aƒ$Iyyyò÷÷·hËÉÉ‘ÉdÒ‰'~v½õΟ?/•––J’\]]U\\,''§Ëî[úi…^xA êׯŸFŽ©¸¸8ùûû7k|kÚ°aƒF¥¨W¯^¶.çKII‘$™L&WíSii©<<<´iÓ&1Ââ=VÀÆ8 ~øAO=õ”­KùY’’’4zôèícÆŒQRRR“cF£Îœ9cѶmÛ6«û»Ü õvìØ¡{ï½×¢íÆolV¸¥¹ú÷ï¯Q£FiË–-ª®®–$Ýpà Í>FRR’Æß ýþûï×¶mÛ´ß~ûí¿¬àfìëJ¯cß¾}ÍÛݺukÐæçç§S§NýÒr%Y_¥åJ®§«««^}õUeffê‘GÑ—_~©[o½U?ü°Îž=Û"5¶”‘#G*44Tï¿ÿ¾­K´nnn’~úñ¤KÀÆŽ;&IŠŒŒ´m!?ÓéÓ§Í€‹ùúúêôéÓæ×§NÒ”)S   †fïÏZ[cõx{{_Á ®Üû￯Ûo¿]3fÌPçÎ5pà@-\¸PUUUÍ_XX¨ó9¨tëÖÍüy¸˜««k‹ÕÞØ¾š{ëyxx˜·íì쬶µT'--MÝ»w7¿ Rnnîï§S§Nzà” cÇŽ©¼¼¼Í± ƒ"##uôèQ[—hê¿K±ö7w!ؘ———$éäÉ“6®äçñööV~~~ƒöüü|‹`ÆÄ‰åææ¦Ï>ûLåå媫«³úå…··· ´6«///«†æ0 ª¨¨°h»té§`Á‚ ”‘‘¡ÌÌL͘1C«V­Òƒ>جãx{{ëÌ™3æspñ£¬¬ìgÕþK5÷:Ú›o¾i±zɰa쮤r%ÜÜÜôꫯjݺu¿´¼wâÄ óÿ 4†@6Ö§OyzzjÕªU¶.¥Ù.^Ù#&&Fk×®mÐç£>RLLŒùõ_|¡—^zI!!!rpp$«á‡¡C‡Z½Iûöíͪíî»ïVRR’EÛþýûqٱݺuSvv¶EÛîÝ»ô3 ÊÉÉ‘ôSbüøñÚ´i“>ýôÓý¬¹ï¾û´k×®í{öìQß¾}/[çÕÐÜëØÚ^ýumß¾]¿ûÝïÌm¿ûÝï´xñb[³víZ <ØüÚ`0X]yÅÁÁAîîî-^ó/‘žž®}ûöYÔ€5¶.€ë«««žxâ ýíoÓ¸qãl뒮ȟþô'ÝsÏ=êØ±£F%ƒÁ õë×kÁ‚¡‡höìÙzþùçåçç§ììlýå/i°¿¹sçêî»ï–‡‡‡bcceoo¯¤¤$ýýïoV=/¾ø¢ÆŒ#???õïß_zì±Çôä“O^vì!CôüóÏëÕW_•§§§>ÿüs½ñÆVûN™2EóçÏW=T\\¬… 6NøûûkïÞ½ŠŠŠÒÎ;5yòd?~\sçÎÕ¨Q£TSS£{î¹GNNNJNNÖoû[-^¼¸YóliͽŽW[yy¹ ôå—_jéÒ¥*((жmÛÔ©S'sŸ   ýýïWLLŒæÌ™£AƒÉÅÅEÇ×êÕ«õúë¯kóæÍû8q¢æÍ›§^½zÉÞÞ^‡ÒsÏ=§Ç¼Õæv9µµµúío«›nºI#FŒ°u9€6ŽBhæÌ™£àà` >\'Ož´u9æ•- ƒÕÇÅzöì©Í›7ëÃ?Thh¨n¸á}ðÁÚ¼y³zôèaî— òòrõë×O3fŒî¿ÿ~‹ãIRhh¨6oÞ¬>ø@7Üpƒ‚ƒƒõÖ[oé½÷ÞkÐך޽{+11QsçΕ···FŽ©±cÇjúôéVçw±ùóçËÉÉI}ûöU·nÝ´xñb-Y²¤Aߤ¤$¹¸¸hРAêØ±£ú÷ï¯òòr½óÎ;û[°`~øa¹»»kÆŒæ°GHHˆ>üðC­ZµJÝ»w—¯¯¯þüç?kÑ¢EŠ‹‹kô:4Gcc.·¯æ^Gk箹mÍ©×ÓÓSÑÑÑZ±b…~øa¥¤¤¨gÏž Æ9R Z³fn¼ñFuêÔIC† QVV–’““Õ»wosß½{÷*<<\<òˆ¼½½åíí­©S§jôèÑš3gN“õµ–ºº:MŸ>]Ÿ}ö™doooë’mœ¡®®®ÎÖEÀÅâãã5mÚ4[—´º'Nèî»ïVUU•>úè#‹Ú\»JKKõØciݺuúàƒ4jÔ([—ÔbRRR$I&“ÉÆ•@ûe0´zõj7΢Bh#|||´k×.ùûû«ÿþZ¸p¡jkkm]€«è³Ï>STT”víÚ¥O?ýôš ƒ®.!´!>>>JJJÒ¬Y³4{ölÝqÇÚ½{·­ËÐÂrss5eÊ 4HÝ»w×ÿýßÿéî»ï¶uY€v„@mŒ£££æÎ«””yzzjРA9r¤¾øâ [—àÊËËÓ¬Y³¦¤¤$­X±B›6mR@@€­K´3Bh£zõê¥O?ýT[·nUQQ‘¢££5hÐ ­ZµJååå¶.Àøâ‹/ôØc)44T+W®Ô_ÿúW>|X>ø ­K´SBhㆪÏ?ÿ\ÉÉÉ2š8q¢üüü4cÆ íÛ·ÏÖåhD~~¾æÍ›§ððpEGGkß¾}Z´h‘233õÌ3ϨC‡¶.ÐŽ9غÐ<ÔÀUPP ÄÄD-[¶L¯½öšz÷î­ &(..N7Þx£­Ë®k………úä“O´fÍmÙ²EzðÁ•˜˜¨¨¨([—¸†°BíL·nÝôì³Ï*--MŸþ¹úõë§yóæé¦›nRXX˜fÍš¥äädÕÔÔØºTàºpäȽòÊ+4h|||4eÊUWW+11QyyyZ¼x1a@‹c…Ú±hÀ€zã7ôÅ_hÆ Z¿~½æÏŸ¯Î;køð኉‰ÑÀjërkBqq±öìÙ£]»viÓ¦M:|ø°:wî¬#FhæÌ™ºï¾ûÔ±cG[— ¸Æà`oo¯»îºKwÝu—æÍ›§#GŽhýúõúä“O4}út•——+00Pƒ Ò Aƒ4pà@………Ùºl ](,,4@vïÞ­ýû÷«®®NŠÕ›o¾©;ï¼Sööö¶.p!À5(,,L³fÍÒ¬Y³TQQ¡¯¾úJÉÉÉJNNÖÓO?­²²2ùúú*::Z·Ýv›¢¢¢Ô·o_yzzÚºtÀ¦ªªªtàÀíÛ·OûöíÓÞ½{•šš*ƒÁ Þ½{kРAzñÅ5pà@y{{Ûº\ÀuŒ@׸:hàÀ8p æÌ™£ªª*}ýõ×Ú½{·öîÝ«… *//OƒA=zôPTT”¢¢¢d2™Ô·o_yxxØz ÀUQ]]­´´4sø#%%Eû÷ïWEE…ÜÝÝÕ§OÝwß}úÛßþ¦»îºKF£ÑÖ%`F €ëŒ£££¢££mnËËËSJJŠùñ¯ýK’$___EFF*""Âü|ë­·ÊÝÝÝVS®HMM²²²”ššª´´4óó¡C‡TVV&GGGõìÙS&“I=ôî¼óNÝzë­²···ué4Š@ŸŸŸüüükn;~ü¸RRR”––¦ƒj÷îÝzóÍ7UQQ!ƒÁ EFF*22R={öT=Ô½{wùûûË`0Øp6¸^)##CJOOWjjª:¤C‡5øÜÆÄÄè©§žÒ­·Þªˆˆ98ðµ }á/ÛÀªÀÀ@jôèÑæ¶ššeddèàÁƒ:tè<¨-[¶hÑ¢E*++“$9;;+44Ô¹ø¤:ØjJhçjkkUPP`}Ô?ê·Ïœ9#IrppPPPÂÃÃ5tèPýïÿþ¯"##.777Ï€–A 4›½½½ÂÂÂÖར¢"effZ<8 ?þX™™™æ~F£Q¾¾¾òóóShhhƒíàà`¹»»·æ´ÐF)//OùùùÊÌÌl°}ìØ1sðÈÉÉI UŸ>}ôë_ÿZ¡¡¡ UDD„\\\l<®.! EF™L&™L¦ï+##CÇWvv¶rrr”››«´´4mݺUyyyªªª2÷ïÖ­›ºté"___uíÚU]»vU·nÝäããcÑÞ¥K9::¶æ4q…NŸ>­“'OêÔ©SÊËËÓ©S§tòäIåçç›ÛóóóUPP ÊÊJó¸nݺÉßß_ ×!Cäïï¯àà`Ýpà ò÷÷·á¬°=!àªóôôl4,"Iuuu*((ÐñãÇ•““£ãÇ›ƒ'OžÔ?þhÞ.//·ëíí-///Fó£sçί/}ÏÝÝ]:uj©_ÊÊÊTZZª¢¢"óãÌ™3¯/}ïÌ™3:uê”EÐÇÞÞ^]ºt1|ºvíª°°0uëÖM¾¾¾ R@@€üýýÕ¡C΀¶@°9ƒÁ ___ùúúêöÛo·Ú'55UK–,QBB‚JJJ4`ÀM™2E¥¥¥æpBýsNNŽE@áüùóV÷éîî.WWWs@ÄÕÕU®®®òôô”›››\]]åáᡎ;ÊÞÞ^nnnrrr’ƒƒƒ<<<$I:u’\\\äìì,;;»a£ÑØèÜëÇ[sîÜ9UWW[}¯´´Ô"lqá•——«¦¦F%%%ã+**TVV¦ºº:K’JJJTVV¦²²2™·KJJtîÜ9?Þüž5ŽŽŽVƒ7æàÍ¥«¹tíÚUƒ¡Ñsš@h³ÊË˵aÃÅÇÇkûöíò÷÷×´iÓôÄO(((¨Ùû©¬¬´Œ”––êìÙ³*--5¯~qöìYsâìÙ³:qâ„Ο?¯óçÏ«¸¸Xuuu í…£££ÜÝÝ%ý¿Š‡‡‡\]]åææ&OOOyyy)((H;v´xÏh4šC3?ê÷lƒ@hsRSS•˜˜¨%K–¨´´Tqqqúøã5bÄÙÛÛ_ñþœœœäãã#Ÿ¯µ~óçÏ«²²RÕÕÕ:wîœùýªª*•––Z{ñjÖ8;;ËÅÅÅê{:t««kƒ×ƒAžžž’d^Ñ\{„€6¡¤¤D«V­R||¼RRR¦Ù³gkÒ¤IêÚµ«­Ëk”Ñh´xh B€M¥¤¤(>>^ï½÷žªªª4jÔ(ýãÿн÷Þ+ƒÁ`ëòÚ$! ÕkÍš5zíµ×´ÿ~…‡‡ëÅ_Ô”)SäååeëòÚ<! ÕÔ¯²bÅ ÕÕÕiäÈ‘zå•WcëÒÚ!àª:qâ„V­Z¥%K–(55U&“I¯¾úª|ðAyxxغ<€v‰@hqµµµÚ±c‡âããµnÝ:¹ººjüøñJLLTŸ>}l]@»G ´˜¼¼<%&&êÍ7ßÔÑ£Ge2™ôßÿþW?ü°ÜÜÜl]À5ƒ@øE.^ ä£>’‡‡‡ÆŽ«™3gªwïÞ¶.àšD ü,999z÷ÝwõÚk¯)''Gýû÷×âÅ‹5qâD¹¸¸Øº<€kÐl•••úøã• Í›7«K—.zôÑG5uêTuïÞÝÖå\7„€Ë:räˆÞ~ûm-[¶L§OŸÖàÁƒµråJ=ZŽŽŽ¶.àºC XUQQ¡õë×+>>^Û·o—ŸŸŸ&Mš¤ßþö· ±uy×5!À¡C‡ôÎ;ïhéÒ¥*..Ö=÷Ü£Õ«Wk̘1rpà«€¶€om€.\¸ 7*>>^IIIêÙ³§fΜ©É“'+00ÐÖåàB¸Ž¥¤¤(>>^+W®Tee¥F¥mÛ¶éÞ{ï•Á`°uyh®3gÏžÕêÕ«õÆoèÛo¿ÕM7ݤ^xA=ö˜ºtébëòÐ B¸NÔ¯òî»ïª¦¦F±±±š7o«´CB¸†kÍš5Z¼x±¾ÿþ{EDDhΜ9š:uª:wîlëòð3àSWW§Ï?ÿ\‰‰‰JLL”£££xà-^¼XwÞy§­Ë@ À5¢  @ï¼óŽ–,Y¢ŒŒ ™L&ýûßÿÖC=$www[—€D €v¬¶¶V;vìP||¼Ö­['WWW?^|ðn½õV[—€«„@íPnn®V¬X¡7ÞxCÇŽ“ÉdÒÿû_M˜0A®®®¶.WÚ‰ššíܹSñññúè£äáᡱcÇêÉ'ŸT¯^½l]ZÚ¸ôôt­X±Bo¿ý¶rss5xð`½÷Þ{Š‹‹““““­Ë€   ª¨¨Ðúõë¯íÛ·«[·nzä‘G4mÚ4…††Úº<ØÚÇkÙ²ezûí·UXX¨ÁƒkõêÕ3fŒø³>~Â7GØXyy¹6lØ`^ Äßß_=ö˜¦OŸ®àà`[—€6ˆ@6’––¦„„-]ºTçÎÓСCµzõjýêW¿’½½½­Ë@F €VtîÜ9­\¹R úüóϦgŸ}V“&MR×®]m]Ú !´‚””ÅÇÇë½÷ÞSUU•F¥mÛ¶éÞ{ï•Á`°uyhg„p•œ={V«W¯Ö믿®ï¾ûNááázñÅ5yòdy{{Ûº<´cBhaõ«¬X±BµµµŠÕ¿þõ/ÅÄÄØº4\#„ÐŠŠŠôþûïkÑ¢E:xð L&“þþ÷¿ë‘G‘Ñh´uy¸Æàgª­­ÕŽ;¯?þXÎÎÎzàôÎ;ï¨oß¾¶.×0!\¡üü|%$$(>>^™™™2™LZ´h‘~øa¹¹¹Ùº<\„Ð ¯²nÝ:¹¹¹iܸqš1c†n¾ùf[—€ë š››«+Vèõ×_WVV–L&“þûßÿjâĉrqq±uy¸Nà•••Úºu«µvíZuéÒE>ú¨¦L™¢=zغ<€@õ~üñG½õÖ[Z¶l™NŸ>­ÁƒkåÊ•=z´m]`F p]«¨¨Ðúõë¯íÛ·Ë××W“&MÒã?®n¸ÁÖåV\—~øá-_¾\o½õ–ŠŠŠtÏ=÷hõêÕ3fŒøó9Ú6¾Ñ\7ÊË˵aÃÅÇÇ+))Iš¬šš‹÷ªªªôŸÿüGþóŸÕ¯_?ýç?ÿÑøñãåîîn£j€ÖE Ú ªª*ååå)''G999ÊÍ͵|deeéĉª®®6éÖ­›üýý|èqY IDATåçç§ììlmܸQyyyª¬¬4÷ñöö–ŸŸŸ‚‚‚Ì‘   ùùù) @m"Tñý÷ßë¾ûîSaa¡ªªª¬ö)..Ö¢E‹4sæÌV®°=!ÐÊ***TXX¨üü|eff*//¯ÁvVV–ŪF£Q¡¡¡òõõU¯^½4dÈùùù™ÛBBBäææfõxEEEV‘‘‘¡={ö(''G%%%æþÎÎÎòó󓯯¯Å1.Þöõõ•Á`¸*ççÓO?Õ˜1cTYYix¹”ƒƒƒ¶lÙB ×%!ЂÊËË•——g5€Qÿ|ìØ1ÕÖÖJ’Í«vøúú*22R±±±Œ   98üü?çFFEFF6ÚçÂ… T’’’”‘‘¡ââbsÿ¦B#õÏ!!!²³³»¢Z—.]ªÇ\’Ìç¨1ÕÕÕÚ²e‹rssåïïEÇÚ;!ÐLÖB—†' TWW'Irrr’———90a2™„'‚ƒƒeoooã™I... Uhhh£}ššRRÒeço-ìQ$0™Læm???uïÞ]žžž6œÑµ¯C‡òóó“ŸŸŸL&“Õ>UUU:uê”Õkš››«={ö(;;[ÕÕÕæ1F£Ñ|/¾¦õÛêØ±ckM¸ª„h“š Ôo_.M  rtt¼lhDj:ôÙgŸ]6té6 ´B´ºŠŠ Z yÔogee©¦¦Æ<Æh4šoÜ·ö ‘››› g[02™L— 4*JIIQNNŽJJJÌýëC#M­6âëë+ƒÁÐS¬" E•——+//Ïê øõÏÇŽSmm­¤ŸV‚ðöö6߀©ØØX‹ðƒ‚‚äàÀŸ3ñóFFEFF6ÚçÂ… ”’’’”‘‘¡ââbsÿ¦B#õÏ!!!²³³k)à:Ä7¨šÍÚMó—Þ<_PP ºº:I’“““¼¼¼Ì7Ì›L¦7ÏËÞÞÞÆ3ÃõÎÅÅE¡¡¡ m´OýçßZØ)))鲟ká>ÿø¹„Ôô ùùù—]!!&&†pMkNh¤©rRRR´qãÆ&Wȱa…XÃ7HÀu ¨¨Èê êõÛ999*))1÷¯{Ôߘ©‰'Z´ùúúÊ`0ØpV@Ûãìì|ÙÐHee¥NŸ>mõßcjjª’’’”••¥ššó£Ñh¹4<"77·Ö˜"Ú!@;WTTdqSù¥a¬¬,?ÞÜÿÒ°‡Éd²¸Ñ¼{÷îòôô´áŒ€k›“““üüüäçç'“ÉdµOUU•N:eõßtff¦’’’”­êêjó£ÑØè*#¾¾¾ TÇŽ[kš¸Ê„mTS7„×o_î†ðèèhnÚ!GGLjF¤¦aŸ}öÙea—nh?„6PQQ¡ÂÂB«!úí¬¬,ÕÔÔ˜ÇFóÛÖÂ!!!rss³á¬ðK 2DÛ¶m³uhGŒF£L&ÓeC#…ÊRRR”““£’’sÿúÐHS«øúúÊ`0´ÆÐ!@ +//W^^žÕ°ëŸ;¦ÚÚZI?­àíím¾;22R±±±7`ÉÁáêÿ9ïâ¼ëêê®úñZJ}Ýí©fkê?WÊ`0´é¹·µësiÁÙÙY^^^êÓ§~ýë_롇’“““Õ±úç?ÿ©O?ýTùùùrqqQxx¸¬ñãÇëæ›o–ôÓ\õÚk¯éÇTYY™ÂÂÂtÿý÷롇R¯^½$I;vìТE‹”œœ¬ššõìÙS3gÎÔ£>Úb £Ñ(£Ñ¨ÈÈÈFû\¸p¡Ñ€ZRR’222T\\lqÎ Ô?‡„„ÈÎήE怆 umå8øÿÅÇÇkÚ´i¶.«¬Ý4}éÍÓæß;tè Î;7ú+û~~~ –½½½göÿ´õpAcÚkÝ-¡=̽­Õxq=.\PAA¾þúk½õÖ[ÊÏÏ×ÚµkÕ³gO‹1[·nÕ_ÿúW½ð 0`€œœœ”ŸŸ¯7jΜ9:{ö¬yŸsçÎÕ7ß|£—_~Yaaa*//×·ß~«ßýîwúî»ïÌý ƒn¾ùf-]ºT½zõÒ?þ¨3fhèСš3gN랔˨ÿÿ¯±ÕF.ýÿÏÉÉI^^^íêÿ?-/%%E’š\ÉÐ4ƒÁ Õ«Wkܸq–íB´5B¶ÒÔ/äççç_7¿ßÖnÜo®öZwKhsok56UO||¼^~ùe}÷ÝwêÔ©“$)''G£GÖÎ;åááÑ`LRR’† bÞ§8 ®]»ZôKKKSdd¤E dÿþýæ•E¤ŸV!‰ŽŽVAAA‹Ìµ5ýÒ’¬…Gåèèhã™ø¹„À/×X ÄÁFõ­ª¨¨¨É_µÏÉÉQII‰¹}Ø£þÆäÈÈHMœ8Ñ¢Í××WƒÁ†³jgÏžÕܹsõñÇ+//OF£Q÷ÝwŸžxâ Ý~û펫?7ÙÙÙš9s¦vìØ!WWW 2D .”———EÿƒjöìÙÚ³g$é®»îÒ¼yóÔ«W/‹~©©©zöÙgµ{÷nÙÛÛkðàÁZ¸p¡Õ<¨çž{N»wï–$EGGë•W^i°ÏÆjOOO׳Ï>«;wšÃ@õ7ò7wß×kgg§þýûëÕW_Udd¤ÅþêyqH¡9ç¾~ÜÅŸÅÉ“'kéҥͮójÍ÷r×Çšæ|®ô³u¥¦M›¦””½òÊ+úË_þ"IZ°`¦OŸn5 "I111×®¬¬Ì&¹XDD„E?k¡///UTTü¢9ØŠ³³³BCCÚhŸÊÊJ>}ÚêÿÇ©©©JJJRVV–jjjÌcŒF£ÕÐ]}[HHˆÜÜÜZcŠm´{EEEV…¾¾-++KçÏŸ7÷¿4ìa2™,n4îÞ½»<==m8£¶åÑGUŸ>}´wï^yzz*--MO>ù¤úõë×äŠuuu2 š2eŠžyæ½ûî»***ÒÓO?­Y³fiÙ²eæ¾ééé>|¸^zé%-_¾\’´~ýz 6L»víR=$ý´rÂðáÃ5wî\-_¾\vvvÚ²e‹xàÇOOO×!Cô§?ýÉÜwóæÍŠ‹‹Srr².[ûôéÓ5wî\½ûî»Úµk—FŒqEû¾´^{{{mÛ¶MãÇ7çÒc^鹯gíZ4·Î«5ߦ®5Íý\Égëçš:uª{ì1s dË–-zòÉ'›=>66VøÃôÇ?þQF£ñŠŽý—¿üEÑÑÑW4¦=qrr’ŸŸŸüüü]1 ªªJ§N²úzff¦’’’”­êêjó£ÑØè*#¾¾¾ TÇŽ[kšW¡®©olÀâãã5mÚ4[—hšº!¸~›‚[Þ¥áeee©sçÎæ¶£G*44´É@Hý¾Ö­[§¸¸8sÛáÇ5xð`åææšÛ&L˜ Ûn»MO?ý´Åø èÛo¿Ubb¢$iâÄ‰ŠŠŠjÐoùòåš4i’E=&LÐÍ7߬ٳg[ô}ûí·µÿþË®Za0´sçNÝ}÷Ý ÞkwÕªUzðÁœ¿Ÿ{î „\É9¸šóµv}¬iîç ¾Þæ|¶ÓØ9«wþüyùøø¨´´T’äêêªââb999]vßÒO+„¼ð JHHP¿~ý4räHÅÅÅÉßßßjÿììl%''kÑ¢E:zô¨¾üòKuïÞ½YǺžýÒ@à¥Û–—’’"IÀ—g0´zõj7β@€¶†@\***TXXh5äQ¿••¥ššó£Ñhqãî¥7󆄄ÈÍÍ͆³º6\z£üСCUVV¦_|Q111rphþÂÃAgΜ±X!¡¢¢B...ª­­5·uëÖM_}õ•‚ƒƒ-Æ=zT P~~~“ý þ?öî<¬Ê:ÿÿøë°¯âQFޏ&š–Û¨é@:šk–-j6£VZù­l™šùZ6Ö43YMY?MrfÊ-“Æ\Qs̰rÅrdpA@ÜðüþàËÎð|\×¹Î9÷ù|îûý¹¹…ë’ûÅ[ÁÁÁvuW6öøñã8p j`õ@ýF n\eƒêp“Ðܹs•œœ¬””MŸ>]K—.Õú裫&ç>44T êÑ£‡¶nݪ_ÿú×ÊÈȨµsp½ë­êëãHM¯ƒÚvñâEåäähçÎZ¸p¡rrr´iÓ&ØÆ„‡‡ëõ×_WLLŒ~÷»ßiÀ€òööVFF†–-[¦?üPëÖ­³Ûï„ ôÇ?þQ;w–«««~üñG½ð zôÑGíÆõíÛW&“Iß|óM­±¡ÊË˳ y\ý³"--Mçγ/{”ÿLˆŽŽ¶ûYѦM‡€úÈdåÏŽ¨g,X ©S§]à&r£7_ýº1ß|å_¼/ÿ¯ÃÍ›7ëý÷ß×öíÛUXX¨ÐÐP5J¯¾úªüýý¯i_Umß¿¿žþymß¾]’Ô¯_?ýñT—.]ìö›””¤çž{Nÿú׿d2™Ô§O½ýöÛŠŠŠª°ÏÇë7¿ùâããURR¢N:é7¿ùM…NUÕ~õ>¯ußWÖëââ¢èwÞQûöíí:àÜȹ_±b…^xá;vLááázë­·4bĈ×YWë­îëãHM®ƒk½¶®võz===Õ¼ysuïÞ]÷Üs|ðAyzz:œ{ðàAÍ™3Gñññ*((Ppp°bccõ /¨M›6¶q;wîÔâÅ‹µuëV¥§§ËÕÕU;vÔ¯~õ+MŸ>Ý®†Þ½{ËÅÅE;vì¨òÜ4&UË_n‰‰‰’TiP=“ɤeË–é¾ûî³ßN @}C Pòòò*½©8++K™™™:sæŒm¼£ÐÈÕ7W¸±¨‰¤¤$ 6L©©©F—8Ý¥K—têÔ©J¿ggg+--Í.0e6›«ü~l±XäëëkàªT†@ܸÊ!nÕ8•Ùl–Ùl¶u)päÂ… •Þ |ðàA%''+??ß6¾<4âè/Ò—?[,¹¸¸8c‰¨§L&“ÞyçMœ8Q>>>úñÇõÄOhúôéF—Ôº‹/*++ËaÈ£ü955U—/_–$¹»»+00Ðö}3**JÇ·û¾&wwwƒWPÿþ···"##YéG¡‘òçøøxeee)''Gåy===Õ¬Y³ ‘+_GDDÈÕÕÕYË„“­Y³Fo¿ý¶~ûÛßÊÅÅEíÚµÓ“O>©‡~ØèÒ€kRþý¯ªnKW~ÿóððPóæÍmßó¢££ùþP‹„× &¡‘ªþB~bb¢Ö¬YSå_Èw —›ÿw36l˜† ft@•ªê”]m‡¤˜˜:$8¿Aj™——Wµ¡‘¢¢"åææ:¼;))IñññJKKSii©mŽÙl¶ Š\±X,òõõuÆÜDòòòªìê‘™™©3gÎØÆ; {L˜0ÁîûMpp°L&“«Àžžž QHHˆ¢££Ž)..ÖÉ“'•]áî””ÅÇÇ+==]%%%¶9f³¹Ò.#ÁÁÁ S“&MœµLu,//Ï.äqõ÷Š´´4;wÎ6¾<ìQþ=!::Úî{E›6mÔ´iSW€š"ÔSîîîÕ†F¤ªoÿæ›oª½!üê×ܯª@Xùëêa}ûö%Ѐnrf³YÑÑÑÕ†F*»©<11Q™™™:sæŒm¼£ÐÈÕá‘àà`™L&g,hP.]º¤S§N9ü÷Xþ:--M¥¥¥¶9f³ÙöoÏQØÃb±È×××ÀUÀÙ„€Ùl–ÙlVTTT¥c.\¸Pé êTrr²òóómãËC#ŽºŒ”?[,¹¸¸8c‰@½pñâEeee9 y”?§¦¦êòåË’Ê:ÚþÝDEEiøðávÿ®ÂÃÃ忯çÀ¿A IòööVdd¤"##+ã(4Rþ¯¬¬,åääÈjµJ’<<<Ô¼yó ‘+_GDDÈÕÕÕYË®[ùõ_Y·ê®ÿèèh®Ô!j¬&¡‘ª:$$&&jÍš5UvHp¡CêZUr²³³«íC‡8¿AP«¼¼¼ª )77×á øIIIŠWZZšJKKmsÌf³]PäêðˆÅb‘¯¯¯3–ˆ›L^^^•]=233uæÌÛxGa &Ø]oÁÁÁ2™L® Nçéé©…„„(::Úá˜ââb|¸Ýy —›ÿ \‰ß €$oooEFF*22²Ò1ŽB#åÏñññÊÊÊRNNެV«$)ÊÝ]}}•ЦÃ.#!!!Šˆˆ«««³–yCªZù¶+×ïáá¡æÍ›ÛÖ}S¯¨O„@ Õ$4rñâEeefJù‹"æÏWN@€þÔ§²rr”˜˜¨5kÖTÙ!ãêÀ„³:dTÕ!%;;»Ú)111tHœˆ@Ô"¯œEN*mß.=û¬BgÏÖ;vcŠŠŠ”››ë0€‘””¤øøx¥¥¥©´´Ô6Çl6ÛE®X,ùúú:¬)//ÏaÈ£üuff¦Îœ9óß58{L˜0ÁîxÁÁÁ2™LusT‹@Ô«Uúè#éÙg%‹Eúî;é¶ÛõôôTHHˆBBBípLqq±-¬‘‘‘¡¬¬,¥§§ëرc:pà€¾úê+?~\%%%¶9AA Uh`¨òòò”™™©¬¬,]ºtÉ6¦¼Ixx¸Ú´i£~ýú)""B!!!jÕª•ÂÂÂäççW«§@í#7*=]úõ¯¥¯¿. „¼úªäéyC»twwWDD„"""*SZZªœœeddèØ±cú{‹¿+þÖxuœÖQ:t¨-äªV­ZÉÛÛû†êP?€±b…ôè£Rp°ôí·Òí·;íЮ®® Uhh¨$i‡v(JQúÇ?þá´ÃÅèঔ“#)Ý¿4a‚ôÃN ƒ8’©LµR+CkàBàZ­X!uî,8 mÝ*½û®äéitUÊT¦Âftœ€@ÔÔ‰Ò˜1Ò¸qÒ=÷HûöIýû]•M†2è4nF7…+¤iÓ$??iófià@£+²SªRå(‡@ÐHÐ!ª’—'MœXÖdÌiÿþz‘¤le«D%B€F‚!P™µk¥)S$77iãF)&ÆèŠ*•©LIR˜Â ®€3Ð!®VP =ú¨4l˜Ô·¯´gO½ƒHR†2ä"+ØèR8BàJë×—u))‘ââ¤#Œ®¨F2•©–j)w¹] ' CHÒ™3e]A†•z÷–¸i RY $LaF—ÀIè›6I“'K/JŸ.mtE×Î…A IDAT,S™ U¨Ñep:„h¼ÎŸ—^|Q2DêÙSJJº)à ’tX‡ÕVm.€“Ð8íØ!uë&-X }ü±´|¹htU×툎¨Ú]'! q¹p¡¬+HÿþR»vÒÒ„ FWuCr”£B7£ §Ù¹Sš4IÊÉ‘>üPš:ÕèŠjÅa–$!@#B‡ ßÅ‹e]A~þsÉb)ë Ò@ RY ÄW¾ V°Ñ¥p:„hØöî•~X:zTúàiÊÉd2ºªZuX‡ÕVmeRÃZ€ÊÑ!@ÃT\,½ù¦tûíR‹Òþýe]AXD* „´S;£ËàDB4<û÷K={J³gK¯½&­_/…‡]U!4>B4%%e]Azô||¤={¤^\î…ZeU²’ „Œ›Ñ@­HJ’&M*{ž=[zî¹)—¥,Ó9!@#Óð  a+--ë -¹¹I»w7ø® W:¬Ã’D hdèàæ•œ,=òˆ´k—ôê«ÒÌ™’««ÑU9Õa–¿ü¤ £KàDãOähX¬ViÁ©kWéÌiçβ® , "•Bè4>BÜ\Ž• ’žx¢ì±kWY0¤‘JR’nÑ-F—ÀÉ„¸9”w¹õV)7Wúî;éÜÝ®ÌPt@ÕÙè28õ_Zš+MŸ^öø÷¿¥îÝ®Êp*P†2ÔE]Œ.€“P¿-Y"ué"åäH e]A<<$I&“I¥¥¥úýï/‹Å"OOOµoß^ï¿ÿ~…ÝüóŸÿT¯^½äëë+___õêÕK_}õ•³WS«耬²Ò!h„„¨Ÿrr¤#¤_ýJš6MJL”zô¨0lÚ´iºpá‚âããuúôi}ôÑGzûí·µlÙ2Û˜;wjÒ¤Izê©§”ššª£Gê‰'žÐ„ ´k×.g®ªVí×~ùË_á 7ºNæftPÁŠÒcIf³´u«Ô¯_¥Cƒ‚‚4{ölÛûè½÷ÞÓìÙ³5nÜ8IÒ›o¾©×^{M>ø mÜøñ㕟Ÿ¯?üáúüóÏën-u耨‹ºÈ$“Ñ¥p2:„¨?Ž—F–ƓƎ•öî­2 "I“&Mª°­wïÞúÏþc{ÿÃ?høðáÆ1B‰‰‰7\¶Qè€:«³Ñe0õÊRçÎe!-[¤ùó%_ßj§Y,– ÛÌf³òòòlï?®-ZT¤œœœ*Û(VYµW{ÕU].€„0ÖÉ“eÝ@Ɠƌ‘öí“î¼³ÆÓ]\ªÿoΠ  8q¢Âö'N¨eË–×Rm½‘¬då+_ÑŠ6º À8_}%uí*ýûßÒ¦Me]Aüüjý0·Ýv›þùÏVØþå—_ê¶Ûn«õã9C¢å*WuQ£K`7£ ÐåçK/¼ -X M˜ Í›'ùû×Ùáž{î9Ý}÷ÝjÚ´©bcc%I›6mÒÿþïÿjݺuuvܺ´[»ÕIä#£K`!œkÝ:iÊ©´TúòKiøð:?dŸ>}´xñb½þúëšéÕW¥™3%WW£«j´¶i›Æj¬Ñe¨ø~*²Z¥ ¤=ÊÞïÞ]Ö„0ˆar”£ÿè? F—  ÀÞÑ£ÒÀÒOHO>)}óÔ±£ÑU5z_ëk¹ÉM½ÕÛèRÔB”)ï rë­R^žôÝwÒþ ¹»]$mÓ6E+Zþò7ºõ 1ÈÉ‘,éosüyjª#MŸ^öصKêÞÝ™¢[µUwêN£ËPOºË—¥”ÒӥǗ22þûÙ•]AŽ—vî,ë âáa\½¨ U©:¤C¬ÁF— ž 4tsæHÛ¶•…?.]’&L({-QÖdÚ4)1QŠŽ6ºZ8°Aä+_õQ£KPO¸]€:ô¯I¯¼RÖ%D’Š‹¥íÛË Ÿ}&…„H;vHwÜah™¨ÚmÐ@ ”§<.@=A‡ ¡:yRº÷^Éd²ß~ù²´xqÙg?ü@¤ž+Q‰¶h‹k°Ñ¥¨G„ ‘Õ*Mš$ååI¥¥Ž?ß¿_òðpzi¸6 JP „°C hˆÞ|SZ¿^*.vüyq±´k—4ožsëÂ5Û  ²È¢vjgt)ê!@C³c‡ôòËÒåËU»|Yzî9éðaçÔ…ë²Q5DCŒ.@=C hHrs¥±c%“©êq&“äî.IŸ}æœÚpͲ”¥D%j¨†] €zÆÍèÔ«Uš8±,RZZñsww©¸¸ì¹gOiÈé¿n¿Ýùµ¢F¾Ô—ò’—~¡_] €z†@½¼¼77WEEEöQ–(H˜ÍfùùùÉl6+<<\þþþjÚ´©\]] é¿:®îÒqewOOOùø8îÎPUò®#W+..ÖÙ³g+Œ+ïNRXX¨’’[g’óçÏ«¨¨HgÏžµ ·¤¥¥Ù`Bóòtêòe}+i³¤cÒ¢Erùë_Õ¼ys»G`` š7o®-Z(((HÁÁÁ¶G³fÍjô5CÝ[«µ*V±†j¨Ñ¥¨‡„ N(55Õö8vì˜rrrlÏYYYÊÏÏ·wqqQ‹-ô³ŸýÌ^èØ±£ÃPC`` Ìf³-ØQŸTUS‹-êì¸ãÏS~~¾-8qBÇ· Øxyy)88X!!!vÏááá²X,jݺµ‚ƒƒë¬~üWœâÔOý¨@£KPÀ )..VJJŠ>¬ÔÔT=zÔþ8zô¨]§Œ–-[*$$D!!!jÓ¦úõë§–-[*44ÔöÜ¢E ¹¹ñßV×Ë××W¾¾¾ ­ñœÜÜ\[g–+Ã:ÇŽSbb¢Ž;¦cÇŽ©¸¸XRYhÄb±Ø­[·–ÅbQdd¤:tè ÿºZ^£Q¬b­Ó:ý¯þ×èRÔSüf5’ŸŸ¯ääd¥¤¤())I´½¾xñ¢¤²®‘‘‘ V=tß}÷)22R‘‘‘jß¾=Azª¼ëJçΫ———§””»Ç‘#G´qãF¥§§«¤¤DRÙuЩS'EEE)22ÒöÚb±ÈÅÅÅKºémÓ6å)O#4ÂèRÔSB`§¤¤D‡Òž={´{÷níÙ³GûöíÓÉ“'%IÞÞÞêСƒ:tè aÆiæÌ™jß¾½Ú·o/???ƒ«G]2›ÍŠŽŽVttt…ÏŠ‹‹•ššªŸ~úI‡Ò¡C‡tðàA­ZµJ§N’TÖ½¤cÇŽêÞ½»ºuë¦nݺ©k×®\7|¦ÏÔC=©H£KPOhÄJJJ´{÷n%&&j÷îÝÚ½{·8  .ÈÃÃCQQQêÞ½»î¾ûnÝrË-êСƒ"""d2™Œ.õŒ»»»Úµk§víÚiøðávŸåææê§Ÿ~ÒO?ý¤¤¤$íÙ³G+W®T~~¾\\\Ô¶m[uëÖMÝ»wW÷îÝÕ«W/´ã]Ò%­Öj½¬—.@=F  9s挾ÿþ{}óÍ7JLLÔöíÛUPP Ýzë­êÞ½»zè!EGG«Gòòò2ºd4Í›7Wß¾}Õ·o_»íYYYJLLTbb¢<¨¿ýíoz饗dµZ©¾}ûêç?ÿ¹úöí«N:5š ÒZ­U¾ò5NãŒ.@=F  ;}ú´âããµiÓ&íØ±C?ýô“¬V«:vì¨Þ½{ë­·ÞRŸ>}Ô±cÇFs³=ê…„„Øu9~ü¸´cÇ%$$hùòå***RPPúôé£AƒiðàÁj×®•×­Ïô™ú«¿Bjt)ê1! Hii©víÚ¥õë×kÆ Úµk—$©gÏž9r¤Þ|óMõîÝ[W 8¤Q£FiÔ¨Q’¤¢¢"%&&*!!AÛ·o×oû[=ù䓊ŒŒÔàÁƒ5xð` 4HþþþW^;Î霾ÒWš«¹F— ž#p“+**Ò† ´lÙ2­_¿^§OŸVxx¸¬gŸ}V111jÚ´©Ñe×ÅÓÓS}úôQŸ>}ôì³Ïª¤¤D;wîÔ† ´aÃÍŸ?_®®®êÛ·¯ÆŽ«±cÇ*((È貯ÛúB—tI÷è£KPϹ]®]II‰Ö¯_¯GyD-[¶ÔèÑ£•™™©ßþö·:xð ÒÒÒ´`Á;–0777ýüç?×k¯½¦ï¿ÿ^Ç×'Ÿ|¢½øâ‹ Ull¬.\¨Ó§O]î5ûLŸi°«¹š] €zŽ@ÀM$99YO?ý´‚ƒƒ5tèPýôÓOš5k–ÒÓÓµmÛ6=ýôÓºå–[Œ.pšÀÀ@=ðÀúÇ?þ¡'NhéÒ¥ ÐSO=¥–-[jäȑڴi“¬V«Ñ¥V븎k“6é=`t)nBê9«Õªøøx1BíÛ·×êÕ«õÌ3Ï(%%E úŸÿù…††]&`8ooo;V+W®ÔñãÇõ׿þU………ºë®»Ô¹sgÍŸ?_çÎ3ºÌJ}¢Oä#Ò(£Kp POY­V-_¾\]ºtQll¬ µbÅ 9rD¿ùÍod±XŒ.¨·üýýõÐCiË–-Ú»w¯z÷î­§Ÿ~ZaaaúÝï~§ÂÂB£K¬àc}¬ š ù] €›€zh×®]êׯŸxàÝvÛmÚ³g¶nݪ1cÆÈÕÕÕèòœ"66Öèê=ÎQÍÜzë­Z¸p¡ÒÓÓ5sæLÍ›7OíÛ·×¢E‹tùòe£Ë“$ýKÿÒúQ¿Ò¯Œ.ÀM‚@@=râÄ M˜0A={ö”«««¾ÿþ{-Y²D]»v­³cšL&»‡n¹åM™2EÿùÏêì¸Õ¹ÞõM&S-WR;ÊÏomªí0ÃÕׂ···Zµj¥áÇëã?Ö¥K—*›œœ¬©S§Êb±ÈÓÓSM›6UïÞ½õòË/kß¾}¶qV«UK–,Q¯^½Ô¼ysy{{«k×®z饗tàÀZ]ÏÕõÒK/éðáú÷Þ{õøã+::Z;vì¨ÓãÖÄ"-R´¢Õ]Ý.ÀM‚@@=±eËuëÖMÛ·o׊+´mÛ6EGG×ùq­V«íùòåË:~ü¸>ýôSµjÕJ½zõÒÆë¼G6oÞlÈqëJùy®Mµ}Ž®¼¬V«NŸ>­íÛ·küøñúôÓO­Ã‡W˜·aÃMš4IcƌѾ}ûTPP Ý»wëÁÔ¼yóìM¯¾úª–-[¦ èØ±cÊÎÎÖ;ï¼£uëÖ©K—.µºžÊ4oÞ\ùË_´oß>iÀ€š={¶aÝB T •Z©_ë׆ÀÍÉd­‹ßDÀ X°`¦NjtàTK–,ÑäÉ“5jÔ(}ôÑG pêñM&“ÃÀš5kôÒK/Ùux¨ï*[K}PŸk+WU ,Ðo¼¡={öØ®ÑÌÌL5J[·n•¿¿…9ñññеí3((Hû÷ïW‹-ìÆ>>ÊÏÏ—‡‡Gµû–Ê:„¼üòËZ²d‰zöì©»ï¾[#GŽThhhæ×µ7ÞxC¿ÿýïuøða…„„ÔùñÆj¬Ò”¦]ÚUé˜ââbµnÝZ6lPTT”m{nn®Ú¶m«C‡©E‹š8q¢ºvíªgŸ}Ö6&..N ,°uñ÷÷×±cÇÔ¤I“º[WHLL”$EGG\ ܼL&“–-[¦ûî»Ïn;B räÈmÞ¼Y¯½öš¡ar&“I...jÑ¢…î¿ÿ~edd(!!A±±±’¤øøx7®Â¼aÆiÓ¦M¶ßqÇÓ»wo1BëׯWII‰$©uëÖÕ†8®w^Môïßßî½ÅbQvv¶Ý¶øøx5ªÂÜÑ£G+>>Þö~Ó¦MÇ 2¤Â¶k=§5U“õ\¯ª:“Ô„Þ~ûm¥¤¤hâĉڹs§ºu릇zHµRãxæ™gäíím øÔ¥4¥)Nqš©ª»·¸»»kÚ´iº„,\¸PÇW‹-$Iß~û­FŒa7¦ÿþÚ»w¯í}§N4sæÌ…ƒÔoÆßiÐH%$$ÈÓÓS111F—"I¶!çÏŸ×O?ý¤… ª}ûö¶Ïssse±Xd2™ì-[¶Tjjj…ýùøø8<Ί+tÇwhúôéjÖ¬™ú÷ï¯wß}WÅÅÅUÖw½ójÂl6Û½÷ôô¬r8uê”Z¶lYanpp°N:Uí8GÛ®õœÖTMÖs½<¨6mÚØÞ‡‡‡_W¸ @÷ß¿–,Y¢ÔÔT]¼xQO=õT­Ôx#<==5xð`}ûí·u~¬wõ®‚¤1SíØ©S§jÕªU¶k­´´T|ðžxâ Û˜ŒŒ µoßÞîZjÖ¬™]héÒ¥:yò¤Ú¶m«[n¹E<òˆâââjíúà<B ’ŸŸ¯¦M›ÊÕÕÕèRj$00P§OŸ¶G®|œ?¾Æû Ðܹs•œœ¬””MŸ>]K—.ÕüðCë½÷ÞÓ“O>i÷ùÀW£}yzzªk×®š:uªÖ­[§eË–]SýŒG À ¿øÅ/äåå¥%K–]J¼òÊ+š5k–V®\©ÜÜ\jÍš5zà4kÖ¬kÚ×äÉ“•””¤¢¢"?~\þóŸíB72/44T *..ÖÆvMµUfÖ¬Yš;w®.\¨'NèäÉ“Z´h‘æÎk·þW^yEo½õ–/^¬“'OêôéÓZ¾|¹^ýõ û¬ÍsZ.^¼¨ÔÔT-]ºT111š7ož6mÚ¤€€Û˜ððp½þú늉‰Q\\œòóóUTT¤#GŽhΜ9zê©§ôî»ïÚíw„ JHHPaa¡ÎŸ?¯ÄÄDMœ8Q>ú¨³—XAZZš¶lÙ¢áÇ×Ù1.ë²ÞÓ{zD¨¹š×x^çÎÕ±cGÍš5K?þø£î»ï>»ÏgÍš¥Ù³gë“O>Qnn®Î;§Í›7kذa¶1ýû÷×ßÿþweffª¤¤D999š;w®ú÷ï_këàB âïï¯iÓ¦iöìÙÊÊÊ2¬ŽòŽÕuÖ°X,úüóϵtéRµiÓFÁÁÁš={¶Þ{ï=9Òáþí3>>^ÞÞÞ0`€š4i¢Þ½{ëâÅ‹úä“Oª¬©&ó$iîܹz衇äçç§éÓ§kÞ¼y×¼vGÛÛµk§uëÖéóÏ?Wdd¤Z·n­•+WjݺujÛ¶­m\dd¤Ö­[§•+WªuëÖŠˆˆÐ¢E‹ôé§ŸVØgMÏiMk¿–õT·O“ɤ¦M›ªoß¾úûßÿ®‡zH‰‰‰j×®]…ywß}·–,Y¢åË—«C‡ Pll¬ÒÒÒ´mÛ6uéÒÅ66!!A·Ür‹&Nœ¨ÀÀ@jÊ”)5j”~÷»ßUY_]³Z­š>}ºÚ¶m«Ñ£G×ÙqVj¥Žê¨žÔ“վʌ3ôÆohÊ”)òðð°û,**Jk×®Õ²eËÔºukýìg?Óœ9sì:ºÌž=[«W¯V·nÝäïï¯~ýú©´´TŸ}öÙ ¯ €s™¬V«Õè"àJ ,ÐÔ©S.œâìÙ³êÙ³§¼¼¼´e˻Πœë…^Ð;ï¼£-[¶¨oß¾ur «¬ê¦nê¤NúL׸pႚ5k¦#GŽ(44´* v%&&J’¢££ ®n^&“IË–-Ó}÷Ýg·!òóóÓš5ktüøq 0ÀÐN!@cURR¢'žxBo½õ–-ZTgaIZ¥UÚ¯ýzI/]óÜüü|½ù曚8q"aBŒÖºukíØ±C—.]R÷îݵvíZ£Kôôt 8P‹/ÖòåË5~üø:;–UV½¦×4VcÕE]®i®ÉdRhh¨öíÛ§?ýéOuT!€› €z ""Bß}÷F¥ÿÏÞÇGUÝÿOöu ’@6 ¨ì -*hN‚€`Q·«¥|±ZÔjAm«ð+ ]l¡Öº´ßŸÖ­E…±‚ÒJÄ…Å@2™¬ „ÈžÌï;óc’™,rCòzòÈCï3÷~Ι;É<’ó¾gÚ´iš>}ºrrrŒ. 趪ªª´bÅ 6L¥¥¥úøã5{öì zÎèúB_èQ=Úîç:9sFo¾ù¦ÂÃÃ/@u.6Bºˆ°°0­[·N[·nU^^ž.»ì2-^¼XåååF—t+o¿ý¶†ª'Ÿ|R÷ß¿öîÝ«‘#G^ðóþJ¿Ò,ÍÒ¸àçÐýèb,‹233µjÕ*ýíoÓÀõÈ#¨°°ÐèÒ€‹Vuuµžþy5J×_½&Mš¤ììl-_¾\AAAüü´A™ÊÔ#z䂟 @Ï@   ò÷÷×¢E‹tøða-^¼X/¼ð‚’’’4wî\íܹÓèò€‹Faa¡yä 0@÷Þ{¯FŽ©½{÷ê¹çžS¿~ý:¥†F5j¹–k†f(U©rNÝ€.Ìl6ë±Ç“Ýn׋/¾¨¼¼<?^#GŽÔÓO?­ÜÜ\£KºœŠŠ ýõ¯ÕôéÓ•””¤^xA‹-R^^ž^|ñE1¢SëyQ/ê }¡_èz^Ý€‹@@@€n¾ùfíÞ½[»wïÖUW]¥U«V)99YW\q…Ö¬Y£ÂÂB£Ë SUU¥×^{M7Üpƒ¢££u×]wI’^~ùeÙívýüç?Wtttç×¥*-Ó2e(CÃ4¬ÓÏ û"p‘;v¬þð‡?èÈ‘#z÷Ýwuùå—ë‰'žPBB‚®¸â -[¶L»víRCCƒÑ¥TNNŽž}öYÍœ9S}ûöÕ¼yóTQQ¡ßþö·*))ÑÛo¿­›nºI†Õ¸B+tR'õsýܰtO~F€sãçç§É“'kòäÉúÃþ -[¶èwÞÑK/½¤'žxB‘‘‘²X,š]Ó§O—$Ùl6Y­VY­Výä'?Ñ©S§£ñãÇkܸqJMMÕØ±cåïïopå€wÅÅÅÊÌÌÔÎ;õÑGé“O>Qmm­.¿ürÝpà ²X,ºæšk ]¤%?ÓÏÔ[½õcýØèRtCBº¡äädedd(##C555Úµk—>úè#íÚµKË–-Syy¹ÂÂÂtÅW誫®Òرc5jÔ(ÅÄÄ]:z¨Ó§OëË/¿Ôž={´k×.íܹSòóóÓèÑ£uÕUWiñâÅš0a‚úöíkt¹­úLŸée½¬¿éo V°Ñå膄tsš8q¢&Nœ(IjllÔþýûµsçNíÚµKýë_õøãK’¢££5jÔ(5J£GÖ¨Q£4xð`ùøøÙt3%%%úì³ÏôÙgŸiïÞ½úüóÏuøða566Êl6ëÊ+¯ÔÂ… 5nÜ8}ç;ßQ¯^½Œ.¹]rh‘é;úŽæj®Ñå覄ô0>>>>|¸†®{î¹G’tâÄ ×ýÏ>ûL›6mÒªU«T__¯ 6L—]v™† ¢!C†è²Ë.ÓÀåïïopoЕ(++KYYY:xð ²²²´oß>•””H’âãã5räHÍ™3ÇDJNN6¸êó·Në´û¿ÿL2]€nŠ@Ô»woMš4I“&Mrí«©©Ñ¾}û´wï^íÛ·O_}õ•¶mÛ¦¼¼<9ùùù)99Ù>>ŠUbb¢’’’\A‘ÄÄDÅÆÆªÿþ 5¢[h‡ÚÚZ•””¨°°Pùùù®à‡ó+//OÕÕÕ’$%$$hÈ!š0a‚.\è DEEÜ“ÎsŸîS¤"µLËŒ.@7G íÒ«W/=Z£GnöXii©[` 77Wv»]™™™ÊÍÍUUU•«mHHˆú÷ï¯èèh 0@ÑÑÑ®íþýû+**J}úôQŸ>}äçǯ±:Ryy¹Ž;¦ÒÒR=zT………:zô¨ ܶ¿þúk×süüüÔ¿W°çÊ+¯t ûÄÅÅÉ×××À^o£6ê-½¥-Ú¢…]€nŽ¿¤ ÃDEE)**JcÆŒñøøÑ£G]+N”””¨¨¨ÈõßÇ«¨¨HGU}}½Ûó"##Õ·o_W@¤éWDD„BCC¦°°0™Íf………)44TÁÁÁÑõNÕÐРS§NéäÉ“ª¨¨ÐéÓ§UQQáÚwêÔ)•––êØ±c:~üx³¯¦ã¥~ýú)..NýúõÓ·¾õ-W0ǹ?66–`N Né”~¨j(MiF— à/¸è4ÑÑÑŠŽŽÖÈ‘#½¶illÔ×_­ÒÒRW€¡´´Ômûøñã:|ø°ëÿËËË›…œüüüÜB"Îm???…„„( @½zõR`` ‚‚‚¬€€…„üÿ‚ƒƒäõØMÕÔÔ¨²²²Ù~gé®®N§OŸV}}½***ÔØØ¨òòrIRYY™¤oVóhllÔÉ“']ÁOÇvŠŒŒTxx¸úôéã ÑŒ9²Yˆ¦oß¾ŠŠŠRtt´½m³TKU«Z­Ò*£KÐC@—âãã£~ýú©_¿~íz^UU•***TQQáZ%ù]QQ¡òòrW¸ÂùßŠŠ Õ××ë믿V]]*++USS£êêjUUU¹Žíl×TÓvN¾¾¾ ÷XgDD„|||ÜÚùøø(""B’d6›%I‰‰‰òññq…WÂÃÃ]+ „……)""Bnû¼Ö»zWë´Nÿ«ÿUõ1º=t ÁÁÁ Ö%—\bÈùM&“^}õUÍ™3ÇóÃ_ëkÝ¡;t£nÔMºÉèrô >F£F5j¾æ+D!ú“þdt9zV€s°R+µMÛ´C;®p£ËÐðB´Ó'úDË´LOëiÕX£ËЀv(W¹æj®&j¢–h‰Ñåè¡„@;,ÔBU©J/é%™d2º=”ŸÑÀÅb­Öêu½®wõ®.Ñ%F— c…h«¬z@è)=%‹,F— ‡#­ÈU®æižæj®ÔƒF—B %ªÐ ÍÐ Ðz­7º$ù]tUjÔ|ÍW©Jõ‰>Q/õ2º$D ¼zLi³6ë}½¯þêot9àB к]·k‰–èÝat9Ð 8Ë—úR³5[³4KÿGÿÇèrÀ#!ð_¹ÊÕdMÖhÖ zA>ü @Å_3@R©Ju­®U_õÕ[zK 4º$ðÊÏèÀh•ªÔtMW½êµMÛ¡£K€УթN³5[9ÊÑGúHýÔÏè’ UBôXõª×ͺYëcmÓ6¥(Åè’ M„è‘Ô ùš¯ÍÚ¬MÚ¤ÑmtIÐfBô8 jÐíº]oÿ÷ß5ºÆè’ ]„èQÕ¨;t‡ÞÐz[ok’&]´=F£u§îÔßõw½¡7ô=}Ïè’àœÐ#8äнºWÿWÿWoè MÓ4£K€sF @·× ý@?Ðÿêõ–ÞÒTM5º$8/BtkµªÕ-ºEï轡7ƒè„è¶Î茾¯ïë#}¤ Ú ‹,F—‚@€né¤Njš¦é+}%«¬«±F—†@€n§D%š¢)úZ_k»¶k˜†]t(!º»ìJWº|ä£ÝÚ­xÅ]t8£ €Žò¹>×8S˜Â´C;ƒè¶„èÞÕ»ºZWëR]ªôúª¯Ñ%ÀC ÀEïÏú³fh†fk¶6k³ÂntIpAù]p±ÉÍÍÕž={šíß½{·L&“k»_¿~ºúê«;³´Ç!‡×ãzBOèçú¹–i™L2µþD¸ÈÚiÅŠZ·n]³ýk֬њ5k\Û‘‘‘*++ëÌÒz”ÕèÝ¡7ô†^Ô‹ºU·]t£ .6Ó§Ooµ¿¿¿fÌ˜Ñ ÕôLÇu\iJÓ»zWïé= zVÚ)==]*//÷Ú¦®®N·ÜrK'VÕsìÓ>ÍÔLIÒ.íÒ¥ºÔàŠ ó±BÐNþþþºé¦›àµMdd¤&MšÔ‰Uõ oêM]©+Õ_ýµ[» ƒè±„ç`Þ¼yª­­õøX@@€n½õVùù±@oGqÈ¡Z¡u£nÖÍÚª­ê«¾F—†á/ÒÀ9¸æšk£#GŽ4{¬¶¶VóæÍ3 ªî©BZ Ú¤MZ§uº[w]ŽB€s`2™tË-·(  Ùc±±±ºâŠ+ ¨ªûÉV¶®ÐúHé=½Gþ‹@pŽæÍ›§ÚÚZ·}Z°`L&“AUuïè}[ßV˜Â´W{õ]}×è’ Ë œ£o}ë[4hÛ¾ÚÚZÍ›7Ï Šº‡5ègú™¦kºfi–¶k»âgtYÐ¥ÎÃüùóåïïïÚ4h†n`E]ߟõgÑÑ™fÓ1]«kµZ«µFkô¼žW  ¨º6!Ày˜?¾êëë%IþþþºãŽ; ®¨k; ºO÷éS}ªÅZìöØvm×H”]víÖîfþ?!Ày8p FŒ!Iª¯¯×M7ÝdpE]Wj4WsÕ¨F9äПõg½®×åCkµViJÓXÕôÔH£Ë€.@pžn»í6IRjjª’““ ®¦ëzDè ªNu’$ùè.Ý%‹,z@èI=©7õ¦"ip¥Ðõù]p1¨®®V~~¾Û—ÝnW~~¾rss%I‡Ò•W^©øøx×Wbb¢  øøxõîÝÛà^g«¶jÖÈ!‡k_£U©J}¢O´]Ûu•®2°B¸¸$•––º‚yyyÊËËS~~¾ ”ŸŸ¯’’WÛÐÐP%$$(!!A)))²X,ª©©QDD„ •ŸŸ¯íÛ·+??_Gõø¼³C#ÎíØØXùùu¿_Ù•ªT·èùÈG jp{¬^õ:£3úTŸ€vè~]š¨««Ó±cÇtäÈÙl6Ùl6»¶>¬S§N¹Ú›Íf%''+&&F£FÒìÙ³•œœìÚ#“ÉÔ¦s×ÖÖª°°Ðí|6›MÙÙÙúðÕ——§3gÎ4;÷Ùç‹Urr² ¤ˆˆˆŸ íNÝ©“:Ù, âÔ¨Fݯûõ]}WÃ5¼“«€‹\ôª««U\\ì1ìa³Ù”ŸŸ¯úúzIR@@€ú÷ïï ZX,edd¸Â í°Ú\áoÊÊÊ<ÖmµZU\\¬’’9IRPP+ â)4’ __ß«ÿ|ýQÔFm”CŽÛ9äÐ<ÍÓ§úT è¤êàâE ]ž30á)4áÜv2›Ín ‹Å⶘˜({ÓœÙlVjjªRSS=>ÞRàÅjµ6 ¼ôéÓÇkhdÈ!xiÉAÔÿèZ ƒøÉOjÔWÿý7B#:¥>¸˜€¡jjjTTTä5ìa·ÛUYYéjo6›]!‡¦aAƒ)""ÂÀÞ\AAAm^e¤ihÄjµ*;;[ååå®¶Î1<{ìÎÞNJJ’Éd:¯škT£¹š«F5z|ÜO~ªW½Â¦ôÿþ›¦iŠSÜyz !¸ ÊÊÊš=Î,Øív56~ r (Œ7Îm;>>^~~üJË“ÖV©ªªòø8WÉËËSCCƒ$)00PqqqC#ÉÉÉ0`€üýý[¬ç=¢: }sL_ùÊ!‡Õ¨Á¬Ùš-‹,ú®¾+µ|,@süõ笮®NÇŽóö8tè***\íÏ^Ý#55Õãê¸0‚ƒƒ[\e¤¶¶V………Wi9pà€²²²túôiWû³_˦¡‘‚!ZºF9$IŠÐµºVÓ4M“5Y}Õ·Sú ÝxÕtU‰¦aO«J8ËE®À@RR’zõêepàM@@@‹é›Õ^<]™™™Ú¸q£rssåp8¤ÿ‘|æû(rg¤¤Qu£Û/V•±•ú<ùs%''+!!A¾¾¾ØCè^„ô`Î þž&ùçääèäÉ“®¶f³Ùm%‹Åâ¶:Dbb¢||| ì .4³Ù¬ÔÔT¥¦¦z|¼¦¦FEEEß\Oû‹u¤êˆlµß\Oïo}_ª««“$ùûû+**Ême‘³¯§””………uf÷à¢B  ›r›œß$ìa³ÙZœœï\݃ÉùhÀÀÀ6¯2Òôº´Z­­†š†F!èÉ„\¤œ뛆=œÛ¹¹¹r8’¤   ·IõãÆsÛNHH¯¯¯Á=BOÐÚ*#UUUÍÂKÅÅÅ®ÐH^^ž$}@‰‹‹óIJJR¯^½:³{Ði„tAµµµ*,,ôöÈÊÊÒéÓ§]íÍf³k"|jjªÛäø*22ÒÀÞmÜâ*#uuu:vì˜ÇЈÕjÕ¡C‡TQQájö{£ipĹ #!ð¶ ‚sŸ§Uœ؇ªŒŒ פöÈßßßàÃßß_±±±ŠõºÊHYYY³0•ÍfSff¦6nÜ(»Ý®ÆÆFIÍWÏi‰—Ÿ¿FÐõð—L€  ¬¬ÌkØ#;;[ååå®¶Î bbb”œœ,‹Åâ61=))I&“ÉÀÞ³Ù,³Ù¬¡C‡z|¼¦¦FEEEÍÞ›6›M}ô‘ìv»*++ÝŽç ˆ4 ÞRÐÎjµ6 ÚõéÓÇkh$%%EaaaÙ=- :DMMŠŠŠ¼†=ìv»*++%IþþþŠŠŠòö>^~~üZº‚ÖV©ªªòø½ß¹ÊH^^ž$IŠ‹‹óIJJR¯^½:³{@·ÅÌ  ºº:;vÌkØãСCª¨¨pµwÞ!>99Y©©©ï肃ƒ[\e¤¶¶V………W‡:pà€²²²túôiWû³†4 $''Ël6wV×€‹zowwwîótwwç$]‹Å¢ŒŒ îîð(  ÅÀˆôÍ*SMöØl6effjãÆÊÍÍ•ÃáÔ|•©¦¡‘„„ùúúvV÷€.‹@Ý€s¢­§É¶999:yò¤«­Ùlv›\k±XÜ&Ü&&&ÊÇÇÇÀÞº³Ù¬ÔÔT¥¦¦z|¼¦¦FEEEŽY­V¨®®N’äïﯨ¨¨f+‹8Ž û¬>¬ÊÊJ¥¤¤hÚ´iºùæ›5lØ0·öYYYzê©§´uëV;vL}ûöÕĉõðÃkèСnm;úzé®:ã}p¡ÞSz<\ÁÁÁ-®2RWW§cÇŽy _Z­V:tH®ögÿLnqnF"Àyhz7ò¦w$·Ûí®‰Äλ‘;'’:T®É¥ ¿¿¿Á=‚ôÍdòÎ38Ïs¡Ý‡Ã¡Ó§O+;;[6lÐW\¡W^yEééé¼#=þøãúä“O´~ýz¥¤¤¨ººZ{÷îÕý÷߯§žzÊíµÞ¼y³|ðA-]ºTO?ý´úôé£ÒÒRY­VÝpà zúé§uýõ×»ÚwÖõÒ]uäûàB½§Þÿý?&:Ÿ¿¿¿bccëu•‘²²2+veffjãÆn?×›®ÚÕ44/??~€ Çäà/Õº˜õë×+##Ãè2IßL õöÈÎÎVyy¹«­óNâÞî"ž””Ô)“ÿÑ1:r‚KÇê¬ ·ólܸQ<òˆ¾øâ‹N;§¢££õå—_ê’K.qÛàÀ :ÔUgAAÒÓÓõᇪo߾͎sôèQ]}õÕÚ²e‹]û»R_»ªÎzðZàBª©©QQQQ³ÏÎm»Ý®ÊÊJWû³WiúaРAŠˆˆ0°7Ð9233%ÉkÐ:“ɤW_}UsæÌqÛïcP=®ººZ6›MV«Uëׯ×òå˵páB¥¥¥iàÀò÷÷WïÞ½5fÌÍŸ?_k×®ÕÎ;%I‹E+W®Ô† ´gÏUTTèĉÚ³gÞ~ûm­[·NK—.Õ7Þ¨ñãÇ+99™0ÈE®¼¼\K–,Qrr²‚‚‚£Ûo¿]ÿùÏZ|žóu7™L®¯»ï¾Û­MAAfΜ©°°0EGGkþüù:~üx³cíÛ·OÓ¦MSXX˜ÂÂÂ4eÊíÛ·ï¼úe±X”••åvŽ©S§ºÎ1uêÔfçhËX´Öïýû÷kêÔ© Uxx¸&Ož¬¸Úž}“ɤœœÍž=[f³¹Y›¶ŒKee¥Ç‰×—_~¹[x`ÕªUúÑ~ä1 "},Y´h‘Ö¬YÓ⸶×矮ôôt…„„(<<\×^{­Þ{ï=×ãMûÜÒþÖÆ¬£ÆÔùœ¶\¿my8Ùív·vÙÙÙ:}ú´Û>»ÝÞâxæççkÖ¬YŠˆˆPhh¨¦M›¦ƒ6kמ~žkߥ¶_ïèú•œœ,‹Å¢Ûn»MK—.Õºuë´uëVíß¿_gΜq}øûßÿ®eË–Éb±H’¬V«V¬X¡9sæh̘1ŠŒŒTïÞ½5tèP¥¥¥iáÂ…Z±b…^zé%Y­VÙl6×j$€'¬ Ëa…tçêMïà}ö>§   ·U=šÞÅ;!!A¾¾¾ö­é*×_½F­{î¹G‘‘‘:pà€-Z¤;w¶ºAk+#¤§§kÉ’%?~¼ÊÊÊô“ŸüD!!!úË_þâj—­«¯¾ZË–-Ó 7Ü mÞ¼YË–-ÓöíÛÕ¿ÿsªáwÞÑÃ?¬/¾øBÙÙÙš8q¢üq]wÝu’¤ 6hùòåÚ¶m› Ô®±ðvΜœMœ8QË—/×u×]'___mݺU¿üå/µoß¾fÏ1™LJKKÓòåË5zôhmÛ¶MS§N•Ãáhó¸Ü|óÍŠŽŽÖÏþs™Íf¯ãt饗jãÆ®¾zrèÐ!Íœ9Ó-`p>«R:tH3fÌÐï~÷;]uÕU²Ûíºçž{´cÇŽ6§§ý-Yk·çZótý.^¼Xn×okcÔô±/¿üRwÝu—[ÈÈáp(%%E¯¿þºFŽéuЄ š«­ãRYY©Ÿýìgz饗4vìX]wÝuš9s¦âââܞ׫W/•••)00ÐÛPªººZ½{÷Veee«}m‹›o¾Y×^{­n½õV×¾¬¬,]zé¥çñ6f­=ÞžkÍÓõ›••¥I“&¹]¿-Õïí±o}ë[úË_þâ lÞ¼Y¿þõ¯õþûï{<ÆÙÇúË_þ¢Ûo¿ÝmÿêÕ«õÅ_è…^8§~zã¶ô½½×;PWW§cÇŽ¹}¾9ûóÎáÇuêÔ)W{çgœ¦¡羘˜V¢`(!pþ„¸hôÍê-­ìa·ÛÕØØ(É}uO“!ãããåççgpp±i:<==]•••zôÑGe±XÚuMµ6þĉn«V444ÈßßßuKR¿~ýôïÿ[ nÏ?zô¨&Nœ¨´ZƒSpp°âãã5~üxýô§?UJJJ‹çÈÍÍÕUW]åZU§­cá­ßÞÎsüøqEEEyœxæÌõêÕ«ÍÇò6.åååÚ¼y³6mڤ͛7+==]Ï>û¬+¬“'O¶1›Íªªªjµ¯mѯ_?}úé§Šm±]{!ÞÆ¬µÇÛ3¦ž®ßšš»]¿-Õïí±µk×*++KÏ>û¬$iÚ´iº÷Þ{5mÚ4Ç8ûX%%%ŠŽŽvÛŸ››«qãÆ©¸¸øœúéiŒÛÒ÷ö^ï@[xúœtö6Ÿ“t%BàüpÑ Òýqçk\ šN///×ã?®þóŸ:vì˜F¥n¸A÷Þ{¯üýýÛu¬¶<Öt¿¿¿¿êëë=#88ØmµŠöÖàäçç§3gÎ4 CTWW+,,Luuu’Ú>ÞÎéí<ÞžÓRíç3.gΜÑm·Ý¦ÐÐP½øâ‹’¤!C†èwÞÑ Aƒ¼>ïðáÚ>}º¾úê«6ÕØ???UVV*  Åví „´TOGiGÕäé±cÇŽé²Ë.S~~¾JJJtÝu×iÿþý­~¿÷vž¦×ñùö³­}oïõt„ÚÚZvÈJjTdd¤½p±#çÏ[ „Û~:\UU•×°‡ÍfS^^ž$Iêß¿¿kâ¡ÅbQFF†k2bbb¢BBB î EDDhõêÕZ½zµJKKõþûïë™gžÑŽ;ôúë¯_ðóGEEéÀn«\ˆs9rD‰‰‰nû9¢¨¨(×öùŽETT”JJJš­˜PRRrN5Ÿë¸„„„hÍš5>|¸kßäÉ“õÞ{ïµy÷Ýwe±XÚ}>oúôé£ÒÒÒ6­RSSã,8qâD‡ÕáÔ×Z[ôíÛWãÆÓ+¯¼¢ƒêÇ?þq›Ã'NœPïÞ½Ýö9rD}ûöumwV?;òzÚ* Àî𦬬Ìãç4«Õªââb•””¸Kg¯2â)4’ __ßÎêþ‹@ Ýœ=M"´Ùl*++sµ5›Ín-‹Ûvbb¢||| ì Ð6&“Iêß¿¿¢¢¢4wî\¥¥¥5 Ox{îùšX°`~ùË_ª¢¢BŸ}öY›ŸgµZ›Ýç­·ÞRzzºk»3ÞSRÇ^ï@G2›ÍJMMõzÇþêêj{ äçç»VÙ PŸ>}¼†FRRRÖ™Ýè„ÜÔÔÔ¨¨¨Èãä¿ââbÙívUVVJ’üýýå5ì1xð`…‡‡Ü# ãÜ}÷ÝZµj• ¤“'OjíÚµmZ)"..Nü±ÆŒ£>ø@wÝu— ÚuîåË—kÆŒjhhÐĉ íÛ·ëž{îÑïÿûsí’›eË–iâĉ ׌3d2™´aí^½ZÛ¶mskÛ–±ðÖïåË—k„ ÓôéÓåãã£-[¶èOúS»knϸÜzë­Z¹r¥† &___ú¨fÍš¥ØØX]yå•ÊÉÉÑwÞ©E‹¹µKKKÓ#<¢5kÖ(22R;wîÔÿøÇ«ÃéB]kçò>¸îºëôƒü@÷ÜszõêÕæsýêW¿’$}ï{ßScc£6lØ µk×jûöí®6ñžrž§£®w 3µy•‘¦Aa«Õªììl•——»ÚšÍæf+‹œ½””Ô!J€žÄähéö„`€õë×+##Ãè2º­²²2«z8÷åææºîdäqžs;>>^~~ÜkÝÏÙRï‡÷ß_¿ûÝï´cÇUTT(..N×_½üñVïzþÚk¯iéÒ¥***R||¼V­Zå \4=·óKÒáÇõðÃËjµª¾¾^—_~¹~øáf+´¥?Þ|ùå—úéOª;vH’®¾új­\¹RÇwµiëXxë·$íß¿_>ø >üðCùøøè»ßý®žy楤¤¨¡¡ÁcíÞêo˸ìÞ½[Ï?ÿ¼>øàåççË××W—^z©î¼óNÝwß}ÍΓ••¥•+WÊjµêÈ‘#ª««Ó÷¿ÿ}=ôÐCÍî¦ß– Ì­ûǬ|P™™™êÛ·¯î½÷^=ôÐCnmJKKµxñbmݺU•••š4i’~ÿûß+>>Þí­YGi{¯ßö¾$©¾¾^ƒÖ®]»ӬΦœÇÚ¿¿–,Y¢]»vÉápèšk®ÑªU«tÙe—uH?ÛÛ÷¶^ï@wSUUåõ3§ÍfS^^žë=¨¸¸8¯¡‘¤¤¤vÃt™™™’äuE"@ëL&“^}õUÍ™3Ç}?] sW[[«ÒÒR¯ï:¤ŠŠ W{çš[º[3t–ýû÷kÚ´i²ÛíF—âæäÉ“2dˆ~ó›ßhîܹF—Ó£üýïׯõÒK/]J‡ëª×;ЙjkkUXXèqUº#GŽ(++K§OŸvµoé³krr²Ìf³½à 8Þ!ÜÆ."çr—eçd9‹Å¢ŒŒ î²  K0™Lzæ™gtÛm·©W¯^:xð ~ô£é¾ûî3º´f"##õ׿þUóçÏWCCƒf̘¡ÐÐP£ËêÖL&“þýïkÅŠzî¹çŒ.ç¼]L×;Й\aoÊÊÊš}æµÙlÊÌÌÔÆÛµº]BB‚|}};«{èBœÞ>>ö¼Û¸q£Ö¬Y£G}T>>>|¸fΜéö9<11‘•ü@—artÕYz¬õë×+##Ãè2 q®“Õ<…F˜¬†î¨+B:Û¬Y³¯µk×¶ù9F‡:Û¹ŒQGa¬»® 5n]éõh “ɤ#Fè¹çžÓ°aÃtøðaÝwß}JOO×c=fty@›ÕÔÔ¨¨¨Èãçg›Í¦‚‚ÕÕÕIòº>ûóôàÁƒnp€î'33S’”ššjp%pñ2™LzõÕW5gηý¬]ˆÙlVjjª×‰2UUUÍ&¹9W±Z­ÊËËSCCƒ$)00Pqqq^C#IIIêÕ«Wgv@©­­Õû￯×^{ÍèRº,ƨó0Ö¯—_~Y#FŒ$1B/¼ð‚ÆG ]Š30Ý4ìáÜÎÍÍu…±‚‚‚Ü>÷Ž7Îm;!!A¾¾¾÷ ãp«X¸ˆ+99Y‹Ezúé§õÒK/iëÖ­ÊÉÉQee¥ŠŠŠ´gϽüòËÊÈÈÐСCUUU%«Õªx@iii6l˜BBBÔ»wo3FsæÌÑâÅ‹µbÅ ½öÚkÊÌÌTqq±ÑÝER^^®%K–(99YAAAЉ‰Ñí·ß®ÿüç?®6&“Éõ_ç×Ýwßíz|ß¾}š:uªÂ¦©S§jß¾}nçq>ïÀš2eŠÂÃêiÓ¦éàÁƒçÝÖÓ¾‚‚Íœ9SaaaŠŽŽÖüùóuüøñfc°ÿ~M:U¡¡¡ ×äÉ“uàÀÇÞ±c‡4a„¿ 6hܸq Rbb¢–,Y¢ŠŠ }ÊÉÉÑìÙ³e6›ÝêkéFQkµKß\CÓ¦Ms]CS¦Liv 1Ö7ÖçÛoÏoϸIm{½ýð‡?Ô< ââbmÚ´©Mç0zŒZ«=;;[iiiš>}ºl6›ìv»æÏŸ¯™3gª°°ÐÕŽ±î¼±>ß~´ôü¶Ž›Ô¶×ËoÇ;ûÜ555úö·¿­gžy¦CúÜÞ×®©'Ÿ|RãÆkµo@[UUU¹V²[¿~½zè!ÝvÛmJKKÓÀÕëÿ±wçÑqÕ÷ýÿŸ£]ÞÇ–-k$Kòx,"aq¡È¦)KH9…„°}[ªBOÛ@“†´!IÍ· ô|C¾I€4 ´$$à†m(PL/)lÃXûbawm–ôûÏ ‘F’7i´<÷ptïϽú|îÈ3ƒôyÝ÷Œ,[¶ŒsÎ9‡«¯¾š{h4J0¤ªªŠo}ë[<õÔS¼ñÆìß¿Ÿ;wòë_ÿšû·ãÎ;ïäÖ[oåÿ𩬬$ ¦{¸’$I’$I’$Iã.Ð?Ú_%I’$iœÝsÏ=TWW§»ÓVWWMMMD£Qš››iii!&¶kkk9pàÙÙÙ …‡Ã„ÃaŠŠŠÛ+V¬`Μ9i‘&ƒÙ³gSWWÇüùóû¶mÛF8NšÀRNh¾êª«øà?ÈÍ7ßœ´ÿŽ;îà7¿ù ?þñ“ÎñÀ$&HlûÛßþ–{ï½÷¨Ûî[ à¿øÿøÇû6oÞÌïþîïÒÔÔ”ØwõÕWsúé§éÿ<À§>õ©!ç=餓øüç?Ïg>óàØòj` IDAT¯_*±XŒåË—§¬ºðÌ3Ï$ªA 6Ò÷Hç5­ïW]u'Ÿ|2·ÜrKÒþþð‡¼öÚkÜyç€×z<¯õ±Žc¤ãäº 6Òóu$ß àÆod×®]üë¿þë¨ç‹ç ¾¾žçž{Ž»îº‹mÛ¶ñÒK/±lÙ²®€ô¾X,–øœ8ø³ãÛo¿ÍîÝ»mƒÁàÏŠ·—.]š²J¤É¯¦¦x¯²$I’$I’$éè|ðA.¿üòäýB$I’$M4B&¾“ÿ‡F¢Ñ(±X,Ñ6 ;ñ/S^^NF†,§»uëÖqàÀ¾üå/SUUEVVVÊvÃM®^¼x1/¿ü2eeeIû·mÛÆÙgŸMKKKÒ9Þ}÷],X0¤íš5khnn>ê¶©&qïܹ3é®å]]]äççÓ××7jÿwìØAAAAÒykkkYºt)µµµ‰öÇzý†3ܘöïßÏŒ3Žø{¤óÖ÷áÎßÖÖÆyçǦM›¯õx^ëcÇHÇK äHÎ;Òù~úÓŸrûí·óÊ+¯0kÖ¬QÏs¼Ÿ»ç.++ã²Ë.ã _ø‹/eôš.:;;innö3_}}}¢Z\NN ,Hú¼7ð3ßÊ•+™={všG$)] „H’$I’$IÒ±3"I’$iÒ02ùutt ©,2p»®®ŽÞÞ^འ„%%%CB#ñI„åååÌœ93Í#ÒXÛ½{7_ýêWùå/I{{;øÀ¸ì²Ë¸ñÆÉÎÎN´nruVVû÷ï'777igg'³gϦ§§gÔsŒEÛᎼ¸þ§jûÏÿüÏÜ}÷ÝI“æõúµ··óÅ/~‘ÇœÖÖÖÄ¿Oàˆ+MHa<®Ñh}ËÎÎNLl,???QÉk=~×úXÇq¤Ç§zìXŸ¯á¾×[o½Å¹çžËÓO?ÍêÕ«ú<Çòܶ_S_<à›*ÜÛÜÜLkkkâg#//oذG8¦¬¬ŒÌÌÌ4HÒDe D’$I’$I’ŽÝpÔ·0”$I’$éäçç'& ¦ÒÓÓC{{{ÊÐH$aëÖ­ìÙ³'Ñ> ™x8p2bQQ@`¼†§10wî\î¸ãî¸ãÞ}÷]ž~úi¾ýíoóüóÏóÐCz|AA---”——'íoii¡  `Hû¶¶6 ‡´]¸pá1µ=Z´¶¶¹«kkë¶¿úÕ¯øýßÿý¤}Çzý®¾újN8á6lØ@III¢êÅDúwu¬×èpοiÓ¦¤ ©x­“嵞ÆâùÚ¿?—]vßüæ7‡„AŽÕ‘Ë'>ñ‰¤ýÏ?ÿ<7ß|3¯¾ú*àµh¬¯õX9’ëv<Ÿ¯¸n¸3Î8ƒë®».©OÇ£JÇá>wYdrJõ9iàöHŸ“*++ýœ$I’$I’$I’4EøWI’$IÒ„  ƒTTT¤|¼««‹¦¦¦”¡‘šššï|=88²|ùræÎ;^CÓ0®¿þz¾õ­o±|ùrvíÚÅwÞ9$ÌQ\\Ì‹/¾Èé§ŸÎ3Ï<ÃÿñÓÐÐÀßüÍßpÞyç1gÎ.¹ä<òwÜqÏ>ûìïõ½ï}¬¬,.¸àúúúxä‘G¸óÎ;yî¹çŽ©íÑúÛ¿ý[>úÑ2{öl.¾øb222xòÉ'ùþ÷¿ŸÔîÙgŸ%33“sÎ9gÈ9Žåú}öÙÜrË-|éK_" Q__Ï×¾öµ£ËpßãXk4Úù/¹äz{{9ï¼óÈÉÉá¹çžã†nà»ßýnR[¯õ{ÆãZ…#¹nÇóù‚÷^O^{í5^~ùå£>ÇH÷¹‹[³f @€ 6ŒIttFª¤FG¬¤¯ða%5I’$I’$I’¤éÁ@ˆ$I’$iRÊÍÍMLvN,2‰2‰DûâÞ=;Uh¤¬¬ŒÌÌÌñÚ´‰D¸ûî»9÷ÜsÙ»w/ÅÅÅ\zé¥Üwß}Iíî¸ã>ýéOÓÔÔDiiibòøŠ+xì±Ç¸å–[ø³?û3Î9ç{ì1–/_>äûýä'?ᦛnâ†n ¯¯|ä#<þøãCª<nÛøDÛwùOµo¸ýáp˜Ç{Œ/|á |þóŸ'##ƒsÏ=—û+W&ŽýÕ¯~EUUÙÙÙÇõúÝÿýüå_þ%gœq»víbåÊ•ÜvÛmÜÿýÃŽ RWî{¤ûÖ÷òòr~ö³ŸñÅ/~‘믿žƒ²jÕ*îºë.>þñ{­Óp­uõ=’ëv4Ï×Hûn¾ùfººº˜1c©Œ×s×××GFFFʾhìtvvÒÜÜœ2ìF©¯¯çàÁƒäääPRR’øLRUUEuuubû„N`Ö¬Yi‘$I’$I’$I’Ò%Пê¯É’$I’”F÷ÜsÕÕÕé#¹`ÁB¡å+Ê U„X¸r!Kç-å¤'9!s<©úxµ 7nä /¤¶¶x/ørë­·rýõ×§­O×hüx­'¯ÁÏÆV<šê³Åà@j0L  ¤–——Ú‘4éÕÔÔPYY™æžH’$I’$IÒäxðÁ¹üòË“ö[!D’$I’4mõçõ“Σ(\DyQDŒ«XEìÐÒÖÝFsG3Û{¶#Æ[9oñêìWé | øÿÞ[ ƒC&rÜ^ºtiâŽîÒ`@€oûÛ\sÍ5̘1ƒ7ß|“Ï}îs|ö³ŸM´Ùºuk{˜~^£ñ㵞¼ç¹ÓÑëêꢩ©iذGmm-H´6‡ÃTUU%}FX¾|9sçÎMãh$I’$I’$I’4Ù‘$I’$MZÝt'‚Gºìgÿóe“Mpð’ä䜓‡î?´,üÊB:®ïHLOF£D"jkkéëë 77—ââ┡‘p8Ì’%KÈÎÎïË8­Ä9‡SùãHÚëׯçŸþéŸøò—¿LFF+V¬à¦›nâÚk¯óï=YxÆ×zòò¹;6±XlHÐc`øcàûz^^^Ò{ùš5k’¶KKKÉÊòWð’$I’$I’$I;þñ˜Ñ I’$IGàž{º:ÝÝÐ8ê ã¨B­´ÒÏÐÿ­Í#oØÇHËb“AÆq[ww7)ï"ÞÒÒÂæÍ›Ù·o_¢ýÀ;‰,[¶Œyóæ×þI’4]ôôôÐÞÞ>lØcË–-ìÝ»7Ñ~¤÷äø¶$it555TVV¦¹'’$I’$I’4y|ðA.¿üò¤ýÞžL’$I’t\tÒÉNv²ƒì<´ì´Ä÷ uàÀså“2°QN9§rêˆ¡Ž™ÌLÃ臗“““˜<:œX,–4!5>Aõ…^ ¹¹™ÖÖÖD…ŠÁw#¶bÅ æÌ™“æI’$I’$I’$IcÏ@ˆ$I’$MÃ;F y¼Ë»ôÐ3ä\yä%7‚ ¶bG1ÅÌc^F­#1Z•‘ŽŽŽ!“kãUG"‘uuuôö¾ÊÍÍ¥¸¸xØÐÈÒ¥K™1cÆxO’4ÉŃ‹ƒÃñímÛ¶Ñßß¼„øþ³fÍš¤í²²2233Ó<"I’$I’$I’$)ý „H’$IÒ8릛ìàÝCKm¼Ë»Iû¶³=±¾ƒtÑ•tŽæ3Ÿ,Húz:§ÙW@Ab}6³Ó4j¥[~~þˆUFzzzhooO‰D"lÙ²…½{÷&ÚƒÁ!•EoK’¦‡îîn‡ {lÞ¼™}ûö%Ú|©¬¬> ƒi$I’$I’$I’4y‘$I’¤c´›ÝCƒƒ÷íaOÒñdP0`YÀV°‚³9› R?æ3Ÿ4XSQvv6¡PˆP(4l•‘X,6d²o4¥¦¦†õë×S[[K__0ôîîƒC#¥¥¥deùk I𠆫2ß—ªÊTüµ¿¢¢‚êêj«LI’$I’$I’$IcÀ™’$I’4HŒÍ4°´Ð’r_#tÓt|y,!B|ÙWDA‚,bYþï™&`0H0¤¢¢"åã]]]455 ™(FÙ°aµµµ8p é|ï?04²bÅ æÌ™3^C“¤i-‹ öxûí·Ù½{w¢müµ»¨¨ˆp8LUUUÒkøÒ¥K  ­J’$I’$I’$IãÁG’$I’¦´:F u Ü×F}ô%#¼¤G aÂCö RL1ó˜—¦ÑJé•›››w gà¤ãŽ#‘Hb;. ©,2p»¼¼œŒŒŒñš$MZ477§|íF£Ô××sðàArrrX°`Aâu¶ªªŠêêêÄkïÊ•+™={všG$I’$I’$I’$)Î@ˆ$I’¤Ie{h¡…öCËÀõvÚÙÎvv°ƒw-=ô$?“™PÀ"Qphù²oà"éø ƒTVVRYY™òñ‘&.G"‘!—KJJ†„FâÁ‘òòrfΜ9žÃ“¤qÚ zÄ÷µ¶¶Òßß@^^^ÒëeUUUÒkhYY™™™i‘$I’$I’$I’¤Ãe D’$IRZõÑ—s´ÑF+­<ý;OSK-­´²ýÐÒJ+í´ÓIgÒñƒƒ'q°€‰}…&ö哟¦‘J:yyy#Véé顽½=iÒs|t$aëÖ­ìÙ³'Ñ> ©,204RTTD ¯áIÒéî±qذG]]û÷ïO´¿æ¥ {,[¶Œyó¬b&I’$I’$I’$M%B$I’$w]t±ƒĈÑB Í4»ÞN;9˜tüŒÓfPN9A‚„qg%Ö‹(J¬—PB9i¥¤tÈÎÎ&  …†­2‹ÅRNžÞ¸q#‘H„ÚÚZúúú€ä»å§ ”––’•å¯O$T¯W·Gz½ª¬¬ôõJ’$I’$I’$Išæü ¡$I’¤ÃÒAÇa‹, )¤”RŠ(b«XËZB„XŠ(b KÈ'?£’$iòÉÏϱÊHOOííí)C#‘H„-[¶°wïÞDû`08¤²Èàm¥Gww7Æ=6oÞ̾}ûí>—•••CB@Á`0£‘$I’$I’$I’¤©Ï@ˆ$I’Æ]ÔSOã¡eðz ìfw¢ý fPF!BSÌ)œ’xÄ÷-f±U=$IJƒììlB¡¡PhØ*#±XlHÈ RSSÃúõë©­­¥¯¯ZUbph¤´´”¬,¥u4†«öß—ªÚKü9¨¨¨ ºº:±‡ÉÏ7h+I’$I’$I’$Iéä_Ï%I’t\uÓÍ»¼K -D‰ÒLsb=¾ÝJ+ýôK.ó™OˆaÂ\È…Q”Ø.:´¤yd’$éhƒA‚Á )ïêꢩ©iH@!²aÃjkk9pà@ÒùV¢Y¾|9sçί¡M(±XlذÇÛo¿ÍîÝïnã×0ªJº–K—.%ðó—$I’$I’$I’$MdB$I’tØúé§…ꨣþÐÒH#uÔÑH#M4ÑJk¢}9SL %”RJU‰õ’CË"¥qD’$i"ÈÍÍM„;†30ì00è‰DÛqÁ`pHe‘ÛåååddL®Êb477§¼Ñh”úúz<@NN ,HŒ·ªªŠêêêÄ5X¹r%³gÏNóˆ$I’$I’$I’$IÇÊ@ˆ$I’:éL=âK-µ‰õ覀L2)¢ˆ2Ê(¡„òQ–ZJ(a KXÌb+{H’¤ã" RYYIeeeÊÇG LD"‘!‰’’’aC#'œp³fÍÏá%/ƒƒñ}­­­ô÷¿Wa-///©ßUUUIc)++#33s\û/I’$I’$I’$IB$I’¦‘:h¡…耥™æÄ¾Zj飀\r)¦˜0aJ(áC|ˆð€e KÈ&;Í#’$IzO^^ÞˆUFzzzhooO [ÄÑH„­[·²gÏžDû`08¤²ÈÀàHQQÀá_»»»ill6ìQWWÇþýû‡|ïTaeË–1oÞ¼c»X’$I’$I’$I’¤)Á@ˆ$IÒÑO?-´°íÐ2°ÊGÝ¡e?ïO4,¤ÒCËïð;\ÄE”Q–ØW@AG#I’t|egg ……BÃVioo§¾¾ž††êêꨫ«£¾¾žßüæ7üò—¿¤­­-ÑvÖ¬Y”••QVVÆ’%K(--¥¤¤„X,–8.þßpÇ-[¶ŒóÎ;ÒÒRÊËË)--% ‘•å¯ì$I’$I’$I’$I£ó¯Ë’$I“HŒXÊÊQ¢¼Å[‰ÀG9,`!B„ s1&LE„±’•ÌfvšG#I’4±,\¸… éì줮®Ž†††DØ£¶¶–-[¶‰D8£¾žÍÁ Ùá0¥¥¥|ä#I {”––2þüq•$I’$I’$I’$iª2"I’4tБò¸le+{Ø“h$HøÐREÕT'¶Ë(#“Ì4ŽD’$iêÉËËã„Nà„NHÝ €»î‚Ë/ߎI’$I’$I’$I’¦%!’$Iãhû¨¥–m‡–øzüënv @ˆK-¿ÏïSN9KYJ9å,a Y~”“$I’$I’$I’$I’$iÚr¡$IÒq#–²ÂG”(ÛØF?ý@r… ¸€"Š"L˜9‘™ÌLóH$I’$I’$I’$I’$IÒDe D’$éuÓM-µD‰òï$¾Æ×p€<òXÊR–±ŒU¬âb.&L8Qõc3Ò<I’$I’$I’$I’$I’4Y‘$IJ¡“Nšif#ÙĦ¤*uÔÑK/\åãB.L¬‡ SF™d¦y$’$I’$I’$I’$I’$i*2"I’¦­ì`ë¡e [x›·U>v°€L2Y„YÆ2ª¨J¬‡ 3yi…$I’$I’$I’$I’$IšŽ „H’¤)m{¡xð#¾¾“ä’Ë2–±‚œÃ9\˵,;´”QF9i…$I’$I’$I’$I’$IR2!’$iÒ릛F‰e#ÙÄ&¢‡–ml£Ÿ~²È¢”R„9…SøŸ L˜U¬âDN$“ÌtC’$I’$I’$I’$I’$é°‘$I“B7ݼÃ;I>Þæm¶²•Fé§Ÿ 2(¥”¬`%+¹ YÉJV°‚rÊÉ&;ÝÃ$I’$I’$I’$I’$I:. „H’¤ %F,QÝ#^íc#ÙÂr€ ÁDuu¬#|h9‘™ÉÌ4@’$I’$I’$I’$I’$iì‘$Iã®—^j©e3›y“7ÙÌfÞâ-ÞäMÞå]f0ƒ-Wp'rb¢Ú‡¡I’$I’$I’$I’$I’4Ý‘$Ic¦‹.Þæm6±)©âÇ›¼ÉïUûXÅ**¨àB.L¬—SNi$I’$I’$I’$I’$IÒÄd D’$³vÚy×U>Þâ-6³™È!‡å,çDNdëø<Ÿç$NâN`sÒÜ{I’$I’$I’$I’$I’¤ÉÇ@ˆ$I:l;Ø‘¨òñ:¯ó&oò:¯ó.ï0ŸùœÈ‰œÄI¬c'Z–²”,?vH’$I’$I’$I’$I’$7ÎÌ”$ICìf7oóv"üÿ% À\沜å¬bp«XE„ §¹ç’$I’$I’$I’$I’$IÓƒI’¦±=ìa+[‡?¶±~ú™ÃV°‚U¬¢šêDðc)K Hw÷%I’$I’$I’$I’$I’¦-!’$M=ô°‰Mü–ßòú¡åMÞ¤Ž:f2““8‰Õ¬æn`5«YÅ*Ê(KsÏ%I’$I’$I’$I’$I’”ŠI’¦˜ílçµCËo-›ØD=ä’KÅ¡å£|4±^N¹?$I’$I’$I’$I’$I’&!’$MR9H=õld#5‡–Ml"J€ AV±Šs9—?çÏ© ‚Õ¬&—Ü4÷\’$I’$I’$I’$I’$IÇÊ@ˆ$I“À.vño$BÙÈ«¼Jd‘ÅJVRAWs5•Tr:§SDQº»-I’$I’$I’$I’$I’¤1b D’¤ fÛ?^ã5~Ëoi¢ €E,âdNæ,ÎâOùSNædV±Šl²ÓÜñ6• IDATkI’$I’$I’$I’$I’$'!’$¥Q”h"ü_bÄÈ$“9‘S8…ÏóyNáNæd«~H’$I’$I’$I’$I’$ 0"IÒ¸i¦9)øñ2/ÓN;™dr'PI%ÃßPI%§r*3™™î.K’$I’$I’$I’$I’$i‚2"IÒ8œðÇ_ó׆?$I’$I’$I’$I’$I’tT „H’tŒê©ç%^â×üšjx•WÙÅ.²Èâ$N¢’J¾ÂW¨¤’S8…ÌHw—%I’$I’$I’$I’$I’4É‘$éìg?5ÔðÒ€¥…²Èb«¨¤’?àá|òÓÝeI’$I’$I’$I’$I’$MAB$IA3ͼÀ l`5Ôð ¯ÐM7‹YÌéœN5ÕTRÉ9œÃ<楻»’$I’$I’$I’$I’$Iš&2ÒÝI’&нìeø&ßäb.f! )¦˜Oói^à*©äû|Ÿ7xƒZx”Gù[þ–‹¹Ø0ˆ$I’¤„@ @oo/_ÿú×)//'77—•+Wr÷Ýwiûè£ræ™g2sæLfΜəgžÉüǤ¡×’$I’$I’$I’¤ÉÆ !’¤i©—^Þà ^äE^æe^â%6³™~úYÆ2ÎäL¾ÂW8ƒ38•SÉ&;Ý]–$I’4‰Üxã‰D(**â׿þ5ôGÄÂ… ¹âŠ+x饗¸îºë¸ë®»X»v-ýýý<ùä“\}õÕ<ñÄ|ðƒLó($I’$I’$I’$I™IÒ´pƒ¼Ækl`/ð"Ĉ1‹YœÂ)¬c·s;çr.‹X”îîJ’$Išä ¹ýöÛÛçž{.wÝu·ß~{"òÍo~“¯}ík\y啉vW]u»víâïÿþïùÙÏ~6îý–$I’$I’$I’$MB$ISÒð/ñ_‡–—x‰:â#|„¯óuÎá*¨ ƒŒtwW’$IÒsÝu× ÙwÖYg±eË–Äö«¯¾Êw¾ó!í.¹äþñÿq,»'I’$I’$I’$Iš „H’¦„}ìã%^JTyžç颋"Šø0æÛ|›5¬a«Hww%I’$MqåååCöƒAb±Xb»­­E‹†V(,,,¤µµu,»'I’$I’$I’$Iš „H’&¥ílçe^æ^`øü?zè!L˜5¬á;|‡u¬£œòtwU’$IÒ4”‘1z%ÂÂÂB¶oßÎ’%K’öo߾ŋU×$I’$I’$I’$IS„IÒ¤#ÆòŸDˆðÏñ&o’I&àœÃ9ü%ɇù0¤»«’$I’tXN;í4}ôQn¼ñƤý<ò§vZšz%I’$I’$I’$Iš, „H’&¤nºy‘yЧˆá×ü€Ó9K¹”oñ-Ö°†9ÌIsO%I’$éè|á _ࢋ.bÞ¼y¬]»€§žzН|å+<öØciî$I’$I’$I’$i¢3"Iš0¢D‰ZžäIv³›0aª¨â/ø Îç|æ3?ÝÝ”$I’¤ãâì³Ïæ‡?ü!÷wÇõ×_ÀêÕ«ùÑ~ć>ô¡4÷N’$I’$I’$I’4Ñ‘$¥Ív¶óÏ!ÂcÎÇ™ËÜtwQ’$I’$I’$I’$I’$Išv „H’†H¹ˆ‹¸•[ù=~l²ÓÝEI’$I’$I’$I’$I’$iZ3"I và~ÂOx™—YÀ.åRnã6Îç|²|Ë$I’$I’$I’$I’$I’& g÷JÒ4ÖA¿ä—ü ÿ“ßg;Û9Ÿóù1?æR.%—ÜtwO’$I’&¿§ž‚‡JÞWP÷ÝO?ýþ¾¥Ká¯þj|û&I’$I’$I’$Iš „HÒò_üßå»<ÌÃÌg>×ZÊ)Ow×$I’$ijyüq¸çÈÎNÞÿÔSï¯÷ö¢EB$I’$I’$I’$Ic"#Ý$›:ø>ßçNá\Î¥Ž:~Ĩ£Ž¯óuà ’$I’4®¸â½¯==Ãÿ—•W_Þ~J’$I’$I’$I’¦,+„HÒ$µƒü3ÿÌÝÜÍöpWð~À霞î®I’$IÒÔ÷¡AYÔÕ ß¦»>õ©ñë“$I’$I’$I’$iZ±Bˆ$M2µÔr37SFÿÈ?òI>É;¼Ã½ÜkD’$I’ÆÓ5×@vöð‡Ãpê©ã×I’$I’$I’$IÒ´b D’&‰7y“+¹’å,çá|ƒ&š¸“; Jw÷$I’$iú¹òJèéIýXN\{íøöG’$I’$I’$I’4­‘¤ n [¸Š«XÍj~Ëo¹ŸûÙÊVnæff1+ÝÝ“$I’¤éëÄ¡¢¡uwÃ'?9þ}’$I’$I’$I’$MB$i‚ª¥–?åO© ‚jø?â5^ãJ®$‹¬twO’$I’pÍ5™™¼/€SN•+ÓÓ'I’$I’$I’$IÒ´` D’&˜ì䝸+NäDžâ)¾Ëwy×¹†kÈ$sôH’$I’ÆÏ§>½½Éû23áÚkÓÓI’$I’$I’$IÒ´a D’&ˆnºùßb9˹û¸‹»ØÂª©¶"ˆ$I’$MTK–À™gBÆ€_³õöÂå—§¯O’$I’$I’$I’¤iÁ@ˆ$Mó0«XÅmÜÆgø [ØÂŸð'A$I’$i2¸újÞ[ÏÈ€Š‹ÓÛ'I’$I’$I’$IÒ”g D’Ò¨‘F>ÎǹŒË8ƒ3x‹·øß`6³ÓÝ5I’$IÒáúÃ?|=€k®I__$I’$I’$I’$IÓ†IJƒ~ú¹‡{¨ ‚läIžä'ü„RJÓÝ5I’$IÒ‘*(€ªª÷ üÁ¤»G’$I’$I’$I’¤iÀ@ˆ$³Ílæ,Îâs|Ž›¸‰7xƒ*ªÒÝ-I’$IÒ±¸ê*èï‡ .€ùóÓÝI’$I’$I’$IÒ4•îHÒtòù¿üÆIœÄ«¼ÊjV§»K’$I’¤twwÓÞÞN[[mmm´··³}ûvZZZhoo§½½––:¶oçeàæÿú/^YµŠ… ²xñb Y¸p!‹-bñâÅIë³fÍJ÷ð$I’$I’$I’$I“˜I»ÙÍgø ð7qÿÀ?CNº»%I’$IÓÒÞ½{‡:¶oßN{{;­­­‰õ¶¶6b±XÒ±ùùù,\¸¢¢¢Ä×SO=•E‹ñø¼yœqàáCçmmmåþçë»vír®A‘øù-Z””Ä×.\HF†Å~%I’$I’$I’$Iï3"IcìU^åR.¥Ÿ~žæiÎã¼twI’$I’¦œX,Fss3±XŒ–––ÄúàíÆÆFöìÙ“tl^^¡Pˆ¢¢"‚Á åååœ}öى펪]]]ìØ±#e¿âë[¶l¡¥¥…††zzzFìçpë%%%Ì;÷¨¯¥$I’$I’$I’$ir0"Icè!âZ®åÃ|˜ŸòSæ3?Ý]’$I’¤I¡³³“;wœ¸ÞÞÞÎÁƒ“އ7âa‰ÊÊÊ”!Š’’rrƧ‚cnn.¡PˆP(tXí;::†{,#²aÆÄcååå¸]XXHffæX Y’$I’$I’$I’4† „HÒè§ŸÿÍÿæK|‰ë¹ž»¹›l²ÓÝ-I’$IJ«‘©ªz ”*àPQQ‘2ì°xñb222Ò4Êã'??Ÿp8L8µíáh6mÚtØšáÖÇ3@#I’$I’$I’$I™I:Îúèãnà^îå»|—¸!Ý]’$I’¤1ÑÕÕÅŽ;† t %444ÐÓÓ“tüÀÂH’’æÎ›¦QNyyy‰ê#£¶Åb#>_555Äb1šššØ½{÷ï5Zp$¾^TTD «aK’$I’$I’$IÒ´f D’Ž£>úøþ„Ÿð~ÎϹˆ‹ÒÝ%I’$I:"#V™¸ÞÖÖF___âØxP``( § ,Z´ˆ¬,5•.ñçépŒö31°úÈàŸ‰ÜÜ\æÏŸ?bh$¾½dɲ³­®)I’$I’$I’$I‡Ë¿ºKÒqÒK/ÌóS~Ê¿óï†A$I’$M«AŒ4©¿¡¡½{÷&¯<Ÿ¼‡Y³fÍàG|]SO~~>ùùù„B!*++Gm?ÚÏ[4eýúõ466ÒÝÝtìàŸ·áÖ‹‹‹™7oÞX Y’$I’$I’$I’&!’tœÜÊ­<ȃ<Â#\ÀéîŽ$I’¤),Uņá&ßoß¾ÞÞÞı+6Ä'Ù‡Ãá”“ï­Ø £q¤ÕGFú9ŽF£lذX,Fkk+ýýý‰cSU¤n½°°ÌÌ̱²$I’$I’$I’$¥…I:îç~îàþ…1 "I’$é¨ ž?Üzss3»víJ:6>1~àDøŠŠŠ”ä/^LFFFšF)%ËÏÏ'‡GmÛÙÙÉÎ;Gü÷¯>R__ÏÁƒ“Žƒ£V ƒ”––2{öì±²$I’$I’$I’$7B$é½È‹TSÍ­ÜÊ•\™îîH’$Iš ºººØ±cLj“×ãÛ ôôô$?xòzeeeÊjK–,aΜ9i¥4~òòò…B„B!***Fm?ZȪ¦¦æˆBVí²’$I’$I’$I’”.B$éìg?ŸæÓTQÅ7øFº»#I’$iŒuttŒð¸ÞÚÚJâØøó“ÉÃápÊIæ………dff¦q¤Òäw$ÕGRýÛ¼½iÓ&š››Ù¾};½½½‰csss™?~ÊÀÖàõ%K–=–Ö$I’$I’$I’4‘¤cp·±“|ï‘w•$I’&£x…€Ñ‚tww'¯XŸð‡Y³fMʉàÁ`0M#”4šüü|òóóÕxF3ÚëFMMÍa¿n ·ß–$I’$I’$I’¤á‘¤£ô ¯ð¾Ã=ÜC1ÅéîŽ$I’¤CF«â1p{¤;ý¬âáþ% lŽÑ^“¢Ñ(6l8ìÊBí/Z´ˆ¬,Ý+I’$I’$I’$M'þ…P’ŽÒ_ó׬a ÿ‹ÿ•î®H’$IS^GGLjÕ;âëMMMìÞ½;騸„ê¨+**RN¬.**"¤i”’¦¢#©>ÒÕÕÅŽ;F ²E£QÖ¯_OCC===IÇÇ_ÇF ‘,Y²„9sæŒå°%I’$I’$I’$!’t^çu"DxœÇ àd1I’$éHuvv²sçÎÃñõööv<˜tüÀIÏñIÖ©&?—””“““¦QJÒ‘ÉÍÍ%  …¨¨¨µýha¹7²aÃZZZˆÅbIǦ Ë ·¾xñb222ÆjØ’$I’$I’$I’Ž’I: ÿÀ?°šÕ¬emº»"I’$M#ML¼ÝÚÚJâØT“ÃápÊÉÉ………dff¦q¤Òø[»v-O=õTº»1¥L…kšŸŸO8&Ú¶££#åëñÀõM›6ÑÜÜÌöíÛéííM:~po¸uƒx’$I’$I’$IÒø1"IG¨“Nâ!îäN«ƒH’$iÊ‹Åb#V766ÒÝÝtl^^ހǚ5k†L ...fÞ¼yiá{÷?Ûoݺ•åË—'¶ÿâ/þ‚;wò£ý(±/‰°víûñá–t‰a¬û&Äx§›¾¾¾twa\ÉÏñƹ馛xå•WØ·oßa7øšŽÅ¿¡TçüÏÿüOîºë.ž{î9z{{Y±bŸûÜç¸öÚk“^‹Ž·üü|òóó»úÈp¯ÿñíššb±ìÙ³'éØÁ¯ÿ#…HŠŠŠÆtÜ’$I’$I’$IÒTf D’ŽÐó™'žx‚Õ«W³uëV>ûÙÏÒÐÐÀm·Ýv\¿ÿ±ˆ¿ÞŽÃyo‰Hç½e¸õÉöÞ"I’$I’$I’$5ÿz&IGè)ž¢‚ J(IwW$I’$`ô»¸Ç·ç.îEEETVVN»»¸gggSUUÅO<ÁUW]•ØÿÚk¯±lÙ2þÿìÝy|Tõ½ÿñ×B«A–$„mX´q WEÐJ ¶Š€KÁjmµÈE4«¸ÕÔÚêíÏ…Bëk«tm·[Ü®µ”°„=„€ $!¿?r37CV á$áõô1Ïœ9ßs>ß™3ÉyϧS'Þ}÷]† yìµ×^cذa´hÑ"ˆ’¥ãÞÇÌe—]F³fÍ>|x£ê\ó‡?ü0pà@ž~úi† Ò !‡£|÷‘´´´j·Ý¿?¹¹¹•~N•-gee±hÑ"²³³),,ŒèçVUË)))´oß¾>§-I’$I’$I’$.&è$©±ùš7”$I’ŽPAA›7ofùòåddd0gÎf̘ÁwÞÉ„ 5jƒ "99™ØØX:tèÀ€8÷Üs7n÷Þ{/Ï?ÿ<™™™¤¥¥1vìX~ùË_²`Á–,Yš5kØ¿?ùùù¬Y³†¥K—²páBæÌ™Ãƒ>Hzz:&L`øðᤦ¦’œœÜdà eFŽÉ+¯¼µî…^`äÈ‘Œ=š D=öÊ+¯pñÅGî/X°€!C†Ð²eKzöìÉ-·Üž={¢ÆìÞ½›[n¹…p8LË–-IJJâÚk¯åþçj¬ï“O>á‚ . uëÖ´k׎ /¼×^{­Ú1¡P¨Ò×­²õµ©­lLÙøP(Äõ×_yüÓO?eäÈ‘´mÛ–¶mÛòío›O?ý´Òc¯Y³†Ë.»Œ„„„*ë<œ1µ9vmŸÇO?ý”‹.º(²¯‹.º(j_åç¿xñbž}öÙ¨õuùœTöü”­ËÎÎf̘1´mÛ–.]ºpõÕW“››[aÞË—/碋.¢M›6´k׎o}ë[¬X±¢Æç¾LmÎ©}ûö\zé¥lذ¡ÆZÊŽUTTDóæÍ …BÜyçGTkmÔöܪí|JJJ"a2'žx"û÷ï¯U=]\\ÉÉɤ¦¦2tèPÆŽKzz:Ó§Oç‰'ž`áÂ…,]º”5kÖpàÀöíÛÇš5kX²d ,`ÆŒŒ?ž´´4ÈÊÊâùçŸç¡‡bܸqœ{î¹ 0€N8!R4h£Fb„ Üyç̘1ƒ9sæ°páB233Ù¼y3ÅÅÅA?5’$I’$I’$IÒa •4¦¯Î“¤àk|+¹’ŸòÓ K‘$©Éš={6'N º ©NåççWú-èUuõ(¯eË–Õ~ zùåÄÄDbbüþ‡#±}ûvN9å¶lÙyO;í4^zé%öïßÏÈ‘#Y¹r%PzAw×®]Y¶l‰‰‰@éæO<ñW_}5_~ù%wÜq¡Pˆ§Ÿ~:rŒK.¹„ÓO?I“&q '°bÅ nºé&Þ~ûíj»¬^½šÑ£G3kÖ,Î9çÖ­[ǤI“X²dIÔ¸P(Ta?•­«l}mk«j_|ñçž{.Ó¦MãòË/'&&†W^y…iÓ¦ñæ›o’’’µ#F0}útN?ýt/^ÌE]TísPݘÚ»6Ïã_|Á°aø÷Þ{#Ÿ 0}út/^LŸ>}(..椓Nâƒ> !!!Rã–-[0`6l uëÖuúœTõÚ^pÁÜrË- :”¼¼<ÒÓÓiß¾=¿ÿýï#Û­Y³†aÆ1}út.¾øbš5kÆë¯¿Î<À§Ÿ~Z«Îµ9¿¦¦˜˜^}õU~ó›ßðî»ïÖº¦ªÎíÚÖZÓû¥¶¯áÑÎçÖ[oeÕªU,Z´¨Æy«jìܹ³ÚÏٲ圜ŠŠŠ¢Æ—uêé37%%ÅîP’tÊÕ%I’$I’$IG& 1wî\ƽÞ@ˆ$ž9‘x€IL ºI’š,!j öïßOnnn•ŽòžfggSXX5¾üE§Õ]xš’’Bûöíšåñ笳ÎbæÌ™œyæ™deeqÙe—ññÇpÊ)§0oÞ<úöíKff&“&Mâƒ>¨r_yyyôéÓ'ª3BÛ¶mY¿~=:tˆ¬[»v-áp¸Ú Ưºê*.¼ðBÆY·jÕ*N>ùä: „Ô¶¶ªöwõÕW3pà@n¿ýö¨õ¿ûÝïøä“O˜1cFÔ>þùÏrþùçW9çÊê­jLm]›çñꫯæ?þã?HOOÚ×#<ÂG}Äþðî¾ûnºvíÊ7ÞÙæÁdýúõ<þøãuþœTõÚΟ?Ÿ1cÆDÍçßø›6mЬ?~<ƒ ª0§gŸ}–+¯¼²Vá‹CUv~×EMO?ý4ßÿþ÷*r8µÖô~©íkx$óÙ°ao¾ù&3gÎdíÚµ¼÷Þ{ôîÝ»Æù¨îäååUûù]¶¼iÓ&vïÞ5öpšIIIM¾Ë–$UÇ@ˆ$I’$I’$=!’TGbˆá/ü…+¸"èR$Ij² „((ùùùµúVñ¼¼<¶mÛÆÁƒ#cË. ­ÍÅ¡;w¦yóæÎTU¹ï¾û())aÚ´i<üðÃäææòóŸÿ( tèÐÛn»û￟ƒ2mÚ´j÷wèÅå\pûöíãž{îaøðáµ>Y¶lÉÉɇu¼ªÖMmUí/11‘÷ߟ=zD­ß¶mÆ cÅŠQûØ»w/­Zµªv>‡·ª1µ=vmžÇªöµvíZÎ9çœHŸÏ?ÿœ+¯¼’?ü0²M¿~ýxöÙg9ãŒ3«®šæWöxe¯íÎ;£º”ìß¿Ÿøøø¨ŸOUÕ‘››KÇŽ(R_5mݺ•¤¤¤: „TWkMëçÜ:Üù„B!zôèÁå—_ÎÔ©S#݆Ô0Õôo„ò÷ý7B\\:t¨öße÷»uëFlll€3•¤ºg D’$I’$I’ŽžIª#!BÌe.ãWóÆ’$éˆQ¾þõ¯óÖ[oUùx\\ýû÷§ÿþœzê©ôë×N:Ñ¥Kiݺõ1¬Võ%33“)S¦ðî»ï2tèPzè!† À{ï½Çí·ßÎ[o½ÅàÁƒ™9s&ƒ  ''‡»îº‹W_}•­[·R\\Ùgù_½ìÞ½›{ï½—_|‘œœN;í4.¿ür&Ož\íÀÍ›7gß¾}´hÑ¢Úú&RÛÚªÚ_ll,EEE•ÖϾ}ûjÜÇáÎíp]›ç±yóæìÝ»—¸¸¸¨õ´mÛ6ªÛÏyçǬY³8p o½õ?úÑ¢"uùœÍk[Õœjs\¨ýù},kªj»£­õÐu‡snî|Žä} ÆáÀäää°}ûv¶nÝJvv6Ë—/ç£>bùòåìܹ³Ê±-[¶ä³Ï>«.’¤ÆÌ@ˆ$I’$I’$½ª!~©$I’$IÀ#û,'œpBd]ùîå÷‘MJJ ;väŠ+®`Ĉ‡:ÿüóÉÈÈ`„ ‘uŸ|ò W^y%+V¬¨vlbb"6l oß¾‘u•uémmU]Xÿ­o}‹Å‹s饗F­_²d ééé,[¶¬Ú:Fm]›çqøðáÌ›7ýèGQûzá…>|xÔº±cÇrï½÷rÏ=÷ðꫯ2kÖ¬#ª«¾]pÁÌŸ?Ÿôôô¨õ¯½öZ­Æ×öü®‹šÞxã£Úo]×ZÛ×ðHæcw†§¦Ïù²å7ràÀ¨±‡~Χ¦¦2|øðJ?óƒ‰I’$I’$I’$©é0"I’$IR-•6j#??¿Ú J³²²Xºt)yyylݺ5êÂà–-[F…Cª ‘tîÜÙî#uläÈ‘\vÙeüñ¬ðØèÑ£3f óæÍ‹ZÎ9çpûí·s÷Ýw“œœÌ† ¸ÿþû+Ýÿõ×_ÏÃ?LŸ>}صk3f̨48Ô=÷ÜÃ¥—^Jrr2ƒfÍš5üà?ছnªq>#FŒàî»ïæÑGå„Nàí·ßæ¿ÿû¿¸¶®]»òî»ï2hÐ þùÏrÝuבÍôéÓ=z4ÅÅÅ 6Œ-Zðæ›o2iÒ$~ýë_×XçѨí±kó}:çŸ>mÛ¶eÔ¨QÄÄÄð÷¿ÿ'Ÿ|²Vãçü>Òšš5kFFF?ÿùÏj¿u]km_ÃÃÏ!C…B,]ºôˆkSÍjú,.ûöíGÆÆÅÅÑ¡C‡¨ÏÜp8\éçq·n݈ p¦’$I’$I’$I:^…Jü*:I:,!BÌe.ãt)’$5Y³gÏfâĉA—!3û÷ï'77·Ú UË–³³³),,ŒŸP¡ÃHe!’””Ú·oÐ,¯¾úŠÎ;“͉'žõX~~>ÉÉÉdggÓ¦M›ÈúíÛ·sÛm·ñÚk¯±k×.úõëÇO~ò®¸â àÿ:¼ñÆÌš5‹%K–°gϺvíÊ%—\½÷Þ[!Lp¨wß}—©S§’™™I§N˜¾ñoðë_ÿšîÝ»QmÏ?ÿgžyæÕUÓü*{m«z½«Z¿|ùr¦NÊ[o½ELL _ÿú×yì±Çèׯ_ÔEð•©íù}45…B!Î9ç}ôQRSS+}ªzNÊo{4µVUgmϭÙÏàÁƒ‰‰‰áí·ß®rŽª\ù.Õ…=6mÚÄîÝ»£Æ–.kÓ±+))©ÊŽH’¤Ã“™™ PcGFI’$I’$IRÕB¡sçÎeܸèë— „HÒa2"IRý3"U/??¿Ê `]ÎËË‹{8Ã&&&Ð,%Õ·åË—3räHÖ­[t):ްsçÎ?Ï6oÞLNNEEEQãˇ"«ûlKII¡E‹ÍR’ŽoB$I’$I’$éèUiP=’$I’$éÅÇLJ ‡Ã5n›ŸŸ_ã·¨¯X±‚Í›7³}ûö ¼ÐVjB¡=ö&L U«V¬\¹’)S¦pã7]šš ê‚‹‡ÞߺukT÷”Ê‚‹áp¸ÒÏ .]ºÐ¬Y³g*I’$I’$I’$Ë@ˆ$I’$IMX||<ñññ$''“ššZãöyyyÕ^À›™™I^^7näË/¿ŒÛ²e˨ v« ‘$%% …êkÚ’±hÑ"}ôQî¹çbbbèÛ·/7Ýt×\sMÐ¥©‘¨êóáÐå7ràÀ¨±‡~>„Ãa† R᳡k×®œp ÍP’$I’$I’$Ij| „H’$I’¤ˆ² Gm”u©îÂà²É¶mÛ8xð`dlÙ7À×Ôy$))‰Î;Ó¼¹¿ÂŽÆÈ‘#9rdÐe¨©ÍÏð²åÚü ¯ª‹‡?Ã%I’$I’$I’¤úã_â$I’$IÒ)ß}$--­Úm÷ïßOnnnT·‘C/:ÎÊÊbÑ¢EdggSXX5þÐo—¯jÙo——t<«©ËSÙýÚtyJJJ"--Í.O’$I’$I’$IRf D’$I’$Õ»¸¸8’““INN®ÕöùùùU~S}^^YYY,]º4òXyeß\_þâåªB$]ºt¡Y³fõ1eI:jìܹ³V²mÛ6<G‡ª ”ÝïÖ­±±±õ5eI‡©¦÷~ùåCßûeÁ±òï÷p8\éÏÎ;Ó¼¹¿¢•$I’$I’$I’tdük£$I’$IRâãã‰'99™´´´·/ß}¤²‹Ç³²²X´h7näÀQcË:ÔÔy$99¹ÖIÿ§¦÷gYÈ+;;›={öD=ôý‡2dH…àGÙ²$I’$I’$I’$ B$I’$I’êÈáv9ôBôò÷³²²Xºt)yyylݺ•’’’ÈØÊ:TµÜ¥Kš5kV_S–SYªÂÛ·o§¸¸82¶|Ÿ²÷K8®ô}dI’$I’$I’$I •I’$I’¤ÄÇLJ ‡Ã5n[PPÀÎ;«ílPÖ}dÆ EOHH¨±óHBBݺu£]»võ5e©F‡¥ªZÞ¼y3»víŠ[”*^§¦¦Vz¾'&&Ð,%I’$I’$I’$©n‘$I’$IjàZ¶lIrr2ÉÉɤ¦¦Ö¸}MÕgffÖEõU-{Q½j²ÿ~rss« w”ÝÏÎΦ°°0jü¡a¦´´´J»ãf’$I’$I’$I’t<2"I’$I’ÔÄN÷‘üüü*/Ð/»¿bÅ 6oÞÌöíÛ)..ŽŒ‹‹£C‡•^ èr·n݈­Ïië94pT]ØcëÖ­”””DÆ–ŽÊŸ#áp¸Òs§K—.4kÖ,À™J’$I’$I’$IRÃf D’$I’$é8O|||¤ûBMÊ:‹T(ë>’Íž={¢Æ–u:©©óHÙ};5½®eË7näÀQc}]Ãá0C† ©ô5NHHh†’$I’$I’$I’Ôô‘$I’$IR­•6j£²î#å—³²²Xºti­;ITµÜ¹sgš7÷×\åÕôÜÈÉákŸ}Æï ؘ“Seç—ò]<ìü"I’$I’$I’$I ‹)—$I’$IR½8œî#û÷ï'77·Bp¡üý¬¬,-ZDvv6………Qã*t©,D’’’BûöíësÚõ¦|ê›6mb÷îÝQcË6eÏŰHÏÎfj«V¬=š/¯ºŠ¿öµÈó …š¥$I’$I’$I’$©¶ „H’$I’$)pqqq$''“œœLjjjÛçççWˆØ²e Ë—/géÒ¥lÙ²…¼¼¼¨±åÉ]épB’“*$&&S/s.((`çÎÕ†;Ê–srr(**Š_>Sº©, “’’B‹-*°m­œÓ~ýkxùe7¦N…ääz™¯$I’$I’$I’$©n‘$I’$IR£O8&׸íÞ½{Ù¾};[·n%'''jùÕ‹^å˃_râÔ#•×¢E :uêDçÎILLŒZîܹ3:uŠZÞ½{7Û·o'''‡mÛ¶±mÛ¶ ËeÇÿꫯ¢ŽÕ¦M›¨}%''súé§Ó©S'ºté9~§NèØ±ãÑ?‰]ºÀôép×]0w.ü×ÁÀ0dÜq\|1Ø)D’$I’$I’$I’,!’$I’$IjÒZ·nM¯^½èÕ«WÔú_ñ+>çsþȹê«(**"''‡œœ¶nÝZi¸cÅŠ‘å‚‚‚* p”…Hzöì.éÔ©IIItîÜ™øøøz}ª&”Þ–.…‡‚1c wo˜2®¿Z·¦6I’$I’$I’$IR• „H’$I’$é¸ó ¯p+·òpWEÖ7oÞœ¤¤$’’’8p`ûùòË/Ù²eK$DÒ®];ºté {ÄÄÄÔç4êÞС¥·Õ«á׿†»ï†iÓàškà¶Û [· +”$I’$I’$I’$ý/!’$I’$I:®,g9Wr%Ws5wq×Qí«]»v´k׎“N:©Žªk úõƒ3àž{àw¿ƒ™3áñÇá’KàG?‚³ÏºBI’$I’$I’$I:î5²¯(”$I’$I’ŽÜ¶pq*§òßüwÐå4|:Áw@VüéO°~=  ƒÁœ9PTt…’$I’$I’$I’tÜ2"I’$I’¤ãB>ù\Â%ÄÏ|æG\Ð%5-ZÀرðþû°d „ÃðƒÀI'ÁCÁ®]AW(I’$I’$I’$IÇ!’$I’$Ijòr«¸Š5¬a! I !è’¯¡Cá¹ç`Õ*¸øb¸ÿ~èÞÒÓK;ˆH’$I’$I’$I’Ž !’$I’$Ijò¦2•—y™¿òWúÒ7èrš†Þ½aÆ Ø¼¹42~içQ£ ##èê$I’$I’$I’$©É3"I’$I’¤&í)žâQå·ü–ó9?èršžvíJ»ƒde•†Bvî„#`Ð ˜3 ƒ®P’$I’$I’$I’š$!’$I’$Ij²þÎß™Ä$¦3ñŒºœ¦­Y³Òî o¿ ~ýûÃu×A0}ziPD’$I’$I’$I’Tg „H’$I’$©IZÁ ®à .çr~ÂO‚.çø’–VÚdõj˜0~õ«Ò`È 7ÀÊ•AW'I’$I’$I’$IM‚I’$I’$59;ØÁhF3€<Ã3„]Òñ©W/xðAX¿~Þ| €#`áB() ºBI’$I’$I’$Ij´ „H’$I’$©IÉ'ŸQŒ¢„æ18â‚.ImÛÂĉ°bÌŸ_ºnôh8ã ˜= ‚­O’$I’$I’$I’!!’$I’$Ij2J(á:®c5«y™—éD§ KRy110j¼þ:,[§œS¦@Ïž0}:ìØt…’$I’$I’$I’Ôh‘$I’$IR“q7wóWþÊs<ÇIœt9ªÎé§Ãœ9°aLš³fAJ L˜Ÿ~tu’$I’$I’$I’Ôà‘$I’$IR“ð{~σ<È,fñM¾t9ª­ÄÄÒî 7ÂìÙ™YÚ9dèPX¸JJ‚®P’$I’$I’$I’$!’$I’$IjôÞâ-&1‰óc&21èrt$Z¶ü¿î ¯¿ 0f œt̘ûö]¡$I’$I’$I’$5(B$I’$I’Ô¨}Æg\Â%Œa ÷q_Ðåèh…B0|xiw?†aÃà®» gO¸óÎÒN"’$I’$I’$I’$!’$I’$Ij¼rÉe4£éMožæibüuWÓ2p <ñ¬[·Þ øôî ãÆÁûï]$I’$I’$I’$Ê¿K’$I’$©Q:À¾Ãw(¤E,¢­‚.Iõ¥sg¸ãX»ž|>û Î>†…矇ââ +”$I’$I’$I’¤cÎ@ˆ$I’$I’J¸ŽëXÆ2°€.t º$ -ZÀ„ ð¯Á’%œ W^ '3fÀÞ½AW(I’$I’$I’$IÇŒI’$I’$5:Ó˜Æ\æòWþÊ)œt9 ÂСðÜs¥ÝBFŽ„ÿ¸4 ’ž6]$I’$I’$I’$Õ;!’$I’$IjTæ2—Ÿñ3f0ƒŒº­OŸÒî ›6Á}÷Á /@¯^0j¼óNÐÕI’$I’$I’$IR½1"I’$I’¤Fc)K¹†k˜ÊTþ“ÿ º5$íÛ—vY³ž}vì€!C`Ð ˜3ŠŠ‚®P’$I’$I’$I’ê”I’$I’$5 Ydq—1’‘ü‚_]ŽªØX;Þ}>üú÷‡ë®ƒ=`útÈË ºBI’$I’$I’$IªB$I’$I’Ôàíd'r!ÝéÎæ㯵Tii¥ÝAV­‚ñãaÆ èÞn¸¡t$I’$I’$I’$5bþå\’$I’$I Z!…Œe,{ÙË‹¼HkZ]’›p|6l€Ÿý þþ÷ÒÎ!£FAFFÐÕI’$I’$I’$IÒ1"I’$I’¤«„®çz>à^â%ºÒ5è’Ô˜µm éé°f ÌŸ0bœqÌž]z_’$I’$I’$I’ !’$I’$Ij°~ÆÏøÓÿþw*§]ŽšŠ˜˜Òî ¯¿~À”)ЫLŸ¹¹AW(I’$I’$I’$I52"I’$I’¤éyžgÓ˜Á 8û^zé¥J·ûä“OHII¡¨¨€7ß|“3Ï<“–-[Ò³gOžzꩨí÷ìÙÃí·ßNß¾}iÕªíÛ·gĈ,Z´¨Þç¤(- æÌõëá†`æLèÚ&L€åËI ¡Pˆââb~ö³ŸÑ³gOâââèׯ³fͪ°íÂ… 9ûì³iݺ5­[·æì³Ï®ò½!I’$I’$I’$©i3"I’$I’¤ç>àZ®%tnäFÒÓÓyì±Ç*ÝvæÌ™Lš4‰æÍ›³jÕ*¾óïpË-·°}ûvž{î9xà/^Ùþšk®¡¨¨ˆŒŒ vïÞÍÚµkIOOgæÌ™Çhvj’’J»ƒ¬_¿úUiçSN#`áB()©×ÃOž<™üü|222عs'O>ù$>ú(sçÎlóÞ{ïqíµ×róÍ7³nÝ:Ö®]Ë”)S?~<|ðA½Ö'I’$I’$I’$©á •”Ôó_2%©‰ b.sǸ K‘$©Éš={6'N º IR@ֱ޳9›38ƒ…,¤Í(,,¤W¯^¼öÚk¤¦¦F¶ÍÍÍ¥OŸ>¬ZµŠÎ;3aÂN=õTn½õÖÈ6/¾ø"³gÏŽtQhÛ¶-›6m¢]»vÇ|njD„üfÌ€—^‚¾}aòd˜8âãëôP¡PˆŸüä'Üwß}Që_~ùeî»ï>Þ{ï=.½ôRFŒÁäÉ“£¶›5kÿüç?ùÛßþV§uI’T233HKK ¸I’$I’$Ij¼B¡sçÎeܸèë—í"I’$I’¤ãK¾d£H&™çxŽf4 66–É“'WèòÛßþ–Q£FѹsgÞyçFµÍyçÇ'Ÿ|¹ß¿n»í66mÚTϳQ£Ç—vY¶ Î?î¼zö,ýŸ?×^{m…uƒfõêÕ‘ûË–-cÔ¨Q¶=ztäb[I’$I’$I’$IÇ!’$I’$Ij )är.g';y‘iC›¨Ç'NœÈ¼yóرcÅÅÅüæ7¿aÊ”)‘m²³³éׯ¡P(rëС[¶l‰lóì³Ï’““CŸ>}øÚ×¾Æ÷¿ÿ}^|ñEl¤«*v<ñ¬[ÿùŸðÔSÄ ð¯ÕÉ!zöìYa]BByyy‘ûÛ¶m‹„ŸÊëÒ¥ [·n­“:$I’$I’$I’$5B$I’$I’Ô ÜÌͼÃ;Ìg>ÝèVáñŽ;rÙe—ñÄOðâ‹/’˜˜È™gžÙ¦]»vlÞ¼™’’’¨[qqqd›^½zñ /°k×.ž}öYÌ<Àõ×__ÿ“TãÖ¥ LŸ7“OÂGÁ©§ÂСðüóPî<;\115ÿª¶K—.lß¾½ÂúíÛ·“˜˜xÄÇ–$I’$I’$I’Ô8‘$I’$IRàâ!f3›?ógþƒÿ¨r»ôôtüq ™9s&7ÝtSÔãÆ ãÅ_¬Õ1ãââ8õÔS™8q"¯¼ò sçÎ=ª9è8WÚäßÿ†%K !®¸N>fÌ€½{ëå°gœq .¬°~Á‚œqÆõrLI’$I’$I’$I —I’$I’$j󸛻y„GØj·0`'Ÿ|2Ó¦McåÊ•Œ7.êñiÓ¦qß}÷ñÌ3Ï››ËÞ½{yã79rdd›óÎ;?þñlܸ‘¢¢"¶nÝÊ#<ÂyçW/óS7t(,\«VÁEÁÝwC×®žÙÙuz¨©S§rÏ=÷ðç?ÿ™œœrrrøóŸÿÌOúSî¼óÎ:=–$I’$I’$I’¤†Ï@ˆ$I’$I’“I&˜Àøé¤×jLzz:¿øÅ/øáH‹-¢KMMåå—_fîܹôêÕ‹N:ñÀð£ý(²Í}÷ÝÇüùó9í´ÓhÛ¶-çž{.ÅÅÅüå/©Ó¹é8Ó·oiwuëà®»`Þ<èÝƃwß­“CœsÎ9üîw¿ã±Ç£GôèуÇ{Œßÿþ÷œyæ™ur I’$I’$I’$IG¨¤¤¤$è"$©1 b.sǸš7–$IGdöìÙLœ81è2$Iõl›8‹³À±ˆæ4¯Õ¸üü|:tèÀ_|A×®]ë¹Jé8/¾? ï¿iipóÍpÕUмvçº$IMAff&iiiW"I’$I’$IW(bîܹŒ}ý²B$I’$I’tÌíaqíiÏ\æÖ: ²k×.zè!&L˜`D [‹0v,¼÷,Yá0üàЯ<ôäå]¡$I’$I’$I’¤FÎ@ˆ$I’$I’Ž©bйЫØÎv^áÚÓ¾VãB¡]»vå_ÿú¿üå/ë¹J©  Ï=«VÁ¸qð‹_@žëÖ]$I’$I’$I’¤FÊ@ˆ$I’$I’Ž©tÒyƒ7xèN÷Z+))aï޽̛7víÚÕc…R=éÝ|6l€ûï‡_,]7jdd]$I’$I’$I’¤FÆ@ˆ$I’$I’Ž™Gy”ßð~Çï8›³ƒ.G F»v¥ÝA²²`þ|عFŒ€´4˜3 ƒ®P’$I’$I’$IR#` D’$I’$IÇÄ˼ÌT¦òñ]¾t9RðbbJ»ƒ¼ý6|ø!¤¦Âu×A÷î0}:äæ]¡$I’$I’$I’¤Ì@ˆ$I’$I’êÝG|Ä\Á5\ÃT¦]ŽÔð”uY¿n¸~õ+HI `åÊ «“$I’$I’$I’Ô‘$I’$IR½ÚÌfÆ0†4ÒxœÇƒ.GjØ’“K»ƒlØ3fÀÀ€0b,\%%AW(I’$I’$I’$©0"I’$I’¤zó_1’‘´¡ /ð-htIRãЦ LœË—Ãüù¥ëF†ÓO‡Ù³!??Øú$I’$I’$I’$Î@ˆ$I’$I’êÅAr5W³‘,` $]’ÔøÄÄÀ¨Qðúë°lœu¤§CÏžpç°ysÐJ’$I’$I’$I ˆI’$I’$Õ‹[¸…×x, }‚.GjüN?žxÖ®…ÿüOxê)‡aÂøôÓ «“$I’$I’$I’tŒ‘$I’$IR{’'™ÉL~ËoÌà Ë‘š–ÄD˜>6n„Ù³K;‡œr  BIIÐJ’$I’$I’$I: „H’$I’$©N½Ê«Lf2÷s?ßã{A—#5]qq¥ÝAþýoxýuHH€1cओ`Æ Ø·/è %I’$I’$I’$Õ#!’$I’$Iª3ËYÎwù.ßã{ü˜]Žt|…`øðÒî Ÿ}^wÝÉÉž^ÚID’$I’$I’$IR“c D’$I’$Iub+[¹ˆ‹È@žà‰ Ë‘ŽOýú•vY·®4ò·¿AïÞ0n¼÷^ÐÕI’$I’$I’$IªCB$I’$I’tÔòÉç.!–Xæ18â‚.I:¾uî wÜYYðä“°j  C‡ÂóÏCqqÐJ’$I’$I’$I:JB$I’$I’tTrïñ=¾à ^æe:Ò1è’$•iÑ&L€O>%K 9®¼²´“ÈCQ²©È IDATÁ®]AW(I’$I’$I’$é‘$I’$IÒQ¹ƒ;x‰—xžçéG¿ Ë‘T•¡Cá¹çà³Ïàâ‹áþû¡GHO‡õ냮N’$I’$I’$IÒa2"I’$I’¤#öOñ0ó[~Ë0†]ޤÚèÓfÌ€M›à¾û`þ|‡aÔ(xûí «“$I’$I’$I’TKB$I’$I’tD³˜ÉLæ§ü”ñŒºI‡«}ûÒî _|Ï> ¹¹¥]D ‚9s ¨(è %I’$I’$I’$UÃ@ˆ$I’$I’ÛJVr)—r —0iA—#éhÄÆÂØ±ðÎ;ðá‡Ð¿?\wtïÓ§ÃÎAW(I’$I’$I’$©B$I’$I’tXv°ƒÑŒ&•TžáB„‚.IR]IK+í²z5L˜¿úôè7ÜŸ}tu’$I’$I’$I’Ê1"I’$I’¤Z+ €ÑŒ¦˜bæ1–´ º$Iõ¡W/xðAX¿~/†ÔT5 22‚®N’$I’$I’$IB$I’$I’TK%”p×±œå,d!étI’ê[Û¶0q"¬\ óçCAŒ§Ÿ³g—Þ—$I’$I’$I’!’$I’$Iª•ócžçyæ1TRƒ.GÒ±SÚäõ×!3N9¦Lž=aútر£úñ™gÂÔ©PRr,*–$I’$I’$I’š<!’$I’$IªÑÓ<Í/ø3™É7ùfÐåH ÒgÀœ9°aLš³fAJ L˜Ë—W>æå—áƒà‘Gàû߇ââc[³$I’$I’$I’Ô‘$I’$IRµÞâ-&1‰»¹›¸!èr$5‰‰¥ÝA6n„Ù³áÃaÀ:.ŒîòË_Bóæ¥BþøG3 +]’$I’$I’$Ij „H’$I’$©JkXÃå\Î(Fq?÷]ޤ†¨eËÒî Ÿ~ ¯¿ ¥“O†3Jƒ"K–@QQéöÅÅðÚkpÁ°gO°µK’$I’$I’$IXó  $I’$IRÔK.r!=éÉ3}¢B"e·îݻӼ¹¿Š—$I’$I’$IRÝó¯P’$I’$IŠRB ×s=™d²”¥t¡KÐ%Ij¬žB¡ª/,„µkᬳ`ñbèÝ»^ÊÈËË#++«ÒÛºuë8xð „ÃaÂá0cÇŽ%Ó¿ À 'œP«ý~üñÇüéObË–-ÄÆÆÒ­[·È~ËßN:é$Ú´iS/s–$I’$I’$IRÓg D’$I’$IQîå^þÂ_x™—9…S‚.GRcµ?ÌšUú¨Na!lÝ ƒÃ?þÑá g,_¾œ+V°fÍvíÚT g >ü¨Â ¤¥¥‘––Vc=e·ŒŒŒ*C(åo©©©$%%Ñs!I’$I’$I’¤ãƒI’$I’$EÌe.÷q³˜ÅF]ޤÆìO‚;k·mQäåÁ¹çÂoÀgTؤ°°ìììJCŸ}ö{÷î ..Ž®]»‡IKKcìØ±ôïߟÔÔTºwïNóæÇæ×âÕ…EöïßϦM›* ‹,_¾œ‚‚‚È>* ‹„ÃazöìILLÌ1™‹$I’$I’$I’&!’$I’$I`)K¹†k¸•[™Ìä Ë‘ÔØíØmÚÀW_U|,6bb ¤¤4 rð`éÿwí¢äÜsÉ|à–µjéò‘••Åúõë)..¢ƒÇgâĉ‘û½zõ" ãÉž¸¸¸H½‡***bÆ •†EV¯^Íž={hÑ¢)))•†Eú÷ïO||ü±ž–$I’$I’$I’Ž1!’$I’$Ib-k¹œËÎpäÁ Ë‘ÔÜ~{é­°rsK";v°wýzrW®dϺulÜHñ¶mÄîÜIÛ={h[XÈ ûö±ú–[¸³\è£|—Þ½{s '=»zÓ¼yó*Ã"yyy•vIÉÈÈ +++²îÝ»WéׯmÛ¶=–S’$I’$I’$IR=1"I’$I’tœÛÉN.äBRHa.siF³ K’ÔÈ•-”ïòñÅ_°{÷nbccéÖ­[iPá߈ -Œ9ùd®jÝ:àY4L ¤¥¥‘––Vá±êÂ"ëÖ­ãàÁƒ‘}T©.ˆ"I’$I’$I’¤†Ç@ˆ$I’$IÒq¬BÆ1Ž=ì!ƒ ZãØ’jVXXHvvv¥áƒÏ>ûŒ½{÷G×®] ‡Ã¤¥¥1~üxRSS ‡ÃôèуfÍ  Õ¥êÂ"`ãÆ•†EV¬XA~~~dUEzöìILḺž–$I’$I’$I’ª` D’$I’$é86…)üÿÖBJÐåHj@ ؼys….YYY¬_¿žââb :@0|øp&Nœ¹ß«W/B¡PÀ3@‹-ªí²yóæ¨×¸,,²zõjöìÙÙGJJJ¥a‘þýû,§$I’$I’$I’tÜ3"I’$I’tœz€xЧ˜Ç|xTèÃ. :ÖZ´h9ÿ*“——WáœÎÈÈàóÏ?çË/¿Œì#%%¥ÒÎ"v¯‘$I’$I’$IÇ;!’$I’$Ilj/ù’ÑŒ¦#ù ¡¹¿’”¼¼¼J»|dee±víZJJJ€ÒÐGY'…áÇG.ŽïÛ·/íÚµ xRí%$$0tèP†ZᱪÞÞáp¸Bwß’$I’$I’$éxà_ý%I’$I’ŽEñ¾Ãvð>ïÓžöA—$—*눕•UmG„ò¡;"èx‘@ZZiii«ªcÎÛo¿]eÇœCovÌ‘$I’$I’$IMI’$I’¤ãÀÍÜÌR–²˜Åt£[ÐåHMÖظqc¥ V¬XA~~>-[¶$Gº|Lœ81r¡z=hÖ¬YÀ3‘®²÷O8fÔ¨QQUõÌÈÈ`åÊ•ìÛ·/²äääJ»‹ø”$I’$I’$I…I’$I’¤&î—ü’'x‚¿ñ7ÎäÌ Ë‘½‚‚Ö¬YS¡ËGVVV•Ý  }„Ãáz«¯|׃’’’z=Æ‘î? ÕKmË—/禛nâƒ>૯¾j_ãáÌéhçßÍkøüƒ™3gòæ›oR\\Lß¾}™2e ×\sÍQwíhÑ¢Eµï±¼¼<²²²¢º‹¼ýöÛ<óÌ3ìÞ½€ØØXºuëVig‘“O>™Ö­[U’$I’$I’$IuÅ@ˆ$I’$IRö/qwñÿø\Â%A—#5eWu+“””é,0|øðÈEã}ûö¥]»vÔ^RRRo‹CÑÐ\{íµÜ~ûí¼òÊ+,Y²„#FÔzìáÌ©¡Î¿±øæ7¿ÉÀyíµ×0`Ÿþ97Þx#ÙÙÙüä'?©×c'$$––FZZZ…Ǫzßgdd°víÚÈ{ª|Ø«|w‘>}úо}ûz­_’$I’$I’$©¼PÉñüf’tB„˜Ë\Æ1.èR$Ij²fÏžÍĉƒ.C’½e,ã<Îã»|—ßòÛ Ë‘œÍ›7WÚåcõêÕìÙ³(í6’’Ri§€þýûð,*Wߣ=F}ÕKAAÍš5;¢ñ‡S×±x޲£}ý?ùäY·f͆ ÂÖ­[ëªÄ:µÿ~6mÚT¡»Hu½õêÕË ‘ŽK™™™•±$I’$I’$Iµ …˜;w.ãÆE_¿P=’$I’$IÿŸ½;¯º¼óÿÿ<Ù! I ²!D°¢… *Ô@¥¨U«—NÆeÐZµµþDéØNË|™ÚZ[QÇ ¶–¶¢5ŽX¡.•*H¢lIØa !,!ëï&§9d°ÏÇ}+9÷çsîó¾OΉ—úyå­£h˜ÌdÆ0†Ÿóóp—#…EMMMð¯û?ýôÓ<ðÀ\sÍ5Œ1‚îÝ»“™™Éøñãyà((( ??ŸýèG¼ù曬^½š½{÷²zõjÞ|óMžzê)î¿ÿ~®¾újòòòºl¤-•••Ü}÷ÝäääGzz:7ß|3~øá[XXÈĉIHH ))‰+®¸‚µk×¶yî¼yó=z4qqqdggs÷ÝwÃ5Íš/ˆÁÛ-·ÜÒ©5Ú¨««#**Š@ À<<¶lÙ2&NœHbb"‰‰‰Lœ8‘eË–pÍÎîÙ²e\zé¥Áç¹øâ‹[=OóžW¯^Í•W^IJJJp®#Yû³Ï>cÔ¨QtïÞóÏ?ŸU«VQXXÈÈ‘#IHH`ܸq­öÐüØ¢¢".¾øbzôèABB—^z)Ÿ}öY«s[>¦ùgXZZ2·jÕ*víÚ2WZZJcccH W¯^ìÛ·¯Ã× œbccƒ€îºë.žzê©6O<þøãäççPPPÀ·¿ýmÆOnn.ݺu#77—ñãÇóOÿôOÌœ9“‚‚Š‹‹ƒI’$I’$I’¤Îˆ w’$I’$I:²ª¨âR.¥=˜Íl¢üO@:UTT´êðÑ|+--¥¡¡ý«ýùùùL:5ä/÷Ÿènºé&Î:ë,Þ{ï=’““)**bÚ´iŒ9²Ã.«W¯æ’K.aúôéÌš5‹ˆˆ^ýu®½öÚ6ÏŸ}:/¾ø" ,`âĉíÖÑÙµ§OŸÎ³Ï>KFF>ø S¦L!++‹Y³f‘‘‘Áw¾óî¹ç^zé¥V¯ß×¾ö5|ðA^|ñEêë냯Õ;ï¼Cvvv‡¯5ÀÒ¥Kùæ7¿6jhh`ðàÁ¼üòËÁ5ö÷Øc1zôèv_ƒ®,::ºÃß%-O5wY¼x1sæÌ¡²²2¸Fß¾}Ûì,2dÈâããå–$I’$I’$IÒq"Ðx2÷´—¤C Àæp ×ødI’tHž~úi¦Nî2$é¸TO=Wpïñïó>¹ä†»$é°uú(.. **Š~ýúµy1õàÁƒILL ó.Ž­ý/ÖOLLdÍš5ôìÙ38WRRBNNN‡n¸#Fp×]w…ÌÏš5‹o|ã>þö³8p Û¶më°¾CY£-m­;eÊÎ9çœV{˜1cü1/¼ðB»ïÌþ§L™ÂgœÁ}÷Ýrî/ùK–,YÂÌ™3Cžçí·ßfìØ±ÜÓ¡¬½`Á.¼ðBÊÊÊÈÌÌ ™[¿~=yyy”——‡¬˜={6_ûÚ×Bæg̘ÁÒ¥KCB9ý Ï>ûlž}öY†ÀüùóùÑ~Ä[o½rÞÚµkyçwøÉO~BII ï¿ÿ>¹¹'×?·:úýVRR|[†Úrrr:t(Æ #77—äää0ïBêØâÅ‹ÈËË s%’$I’$I’tü Ì™3‡k® ½~Ù@ˆ$u’I’Ž>!’tè¦1_ð þÿc£Â]ŽtPêêêX»vm›D¯X±‚ªª*bbbÈÊÊj3ô1tèPºuëætû_¬?aÂöìÙÃ÷¾÷=òóó‰Š:¸ÎA}úôáƒ> ÿþ!ó›6m"==ý Bm:éÌùm×ÞJJJ8ï¼óظqc»ïÌþÛ;·¼¼œqãÆQTTò<»wï¦{÷îÜÓ¡¬½sçÎ`ª¡¡ÈÈÈVsQQQÁ:-»uëVzõê2_RRÂèÑ£)++ 9·½ŸÉÌ™3Y¾|9?ûÙϸôÒK¹ýöÛ¹ôÒK[=_ÿþýùêW¿Ê¿ü˿ЧOŸƒz=NûöícÆ !¿›;Œ¬]»–ºº: uX¤å-;;›ˆˆˆ0ïD';!’$I’$I’tø „HÒb D’¤£Ï@ˆ$š™ÌänîæE^ä:® w9R‡Þzë-~øÃ²zõjÖ¬Y¼°¹gÏžäææ¶º 8ŒŒŒ0W}üØÿbýÊÊJyä~ûÛß²eËÎ<óL¾úÕ¯rûí·Ýî:QQQìÞ½›ØØØ>Ç–-[øÎw¾Ã믿ΦM›¨¯¯;Ø@HgÖ8˜}w´‡êêj©­­m÷ñÙtttð}¼¿nݺ±gÏžëìÈá®}8spp¯UK[¶lá´ÓNcíÚµlÚ´‰Ë.»ŒÂÂBÀA=Ÿ¬¦¦†ÒÒRV¯^r[µjÅÅÅìÛ·€øøxrss2dO>ùdH— éX1"I’$I’$I‡¯½@ˆJ’$I’$é0ŸùÜÃ=ü;ÿnDR+III̘1ƒÕ«WS\\ÌwÜÁìÙ³¹îºŽ_¤¦¦²iÓ¦VóÛ¶mk5wà 7ÏÂ… ©®®¦±±±Óû‰5ö—ššÒ¤ÙÆIMM=àcvÿ©©©lß¾=XsË[ËÀÆ¡8škﯼ¼¼ÕÜÆéÝ»÷A¯Ñ»woFÍìÙ³yòÉ'¹óÎ;[…A$I’$I’$I’tø¢Â]€$I’$I’Ï'|Â5\ÃÜÈýÜîr¤ƒrÑEqÑEPWWÇÚµk)..¹½þúë¬X±‚ªª*bbbÈÊÊ"''§ÕmèСtëÖ-œ[êÒëÖ­#++‹ÔÔT¾öµ¯1~üx²³³;|Ü„ xõÕW¹ë®»Bæßzë­VçþùÏföìÙ$''çÚ +´èÌ+??Ÿ_ÿú×|ûÛß™ÿÍo~C~~~‡íÌþ¿üå/³`Á®¸âŠùwß}—»îº‹¿þõ¯‡¸ƒ£»öþÞxã n¼ñƹßüæ7L˜0!dî@›nº‰û·£ªªŠO>ù¤ÍsìÒ±}ûö±aÆàïÄWº¿ÂÊÀJ*7T²cë* âëâIKK#ëœ,ÎøÊ\Þërúföepú`úgö'1"‘b膿#%I’$I’$I:Ñ‘$I’$I:Žmd#_á+œÍÙüœŸ‡»éDEEƒm©¨¨h)..¦  €âââàýúõk3,2xð`å–º¤[n¹…ÿøÇ 8;v0sæÌ"¦OŸÎرcILLdÒ¤IDFFRPPÀ÷¿ÿýVçžwÞyÜwß}|÷»ß%##ƒµk×òØcµ¹nff&ï½÷#FŒàí·ßæ›ßü&ëÖ­ëÔëá‡fܸqôèу¯|å+æÍ›ÇŒ3X°`ÁÛÿôéÓùÊW¾B}}=ãÆ#&&†wÞy‡[o½•Ÿþô§‡µ‡£¹öþžzê)¢¢¢øò—¿LCCóæÍcæÌ™¼óÎ;!çµ÷3lvÙe—ñÿøÜzë­tïÞ½ÕóŒ=š@ ÀÂ… hýÇ›ö~¿SRR ͤ¤¤óD ÕçWˆ—L]l  ì 좸iŒ8âèF7bˆ!žx¢ˆ"‘D"ˆ ‰¤¿=)$‘D$’HQ$@4Ñt§;±Ä¶Z+žxâˆ#‰$ºÑ8â‚kI’$I’$I’¤#Ï@ˆ$I’$IÒqj{¸œË‰'žWy•bÂ]’tT¤¤¤——G^^^«c…EJKKihh®ÑVX¤£ Êñª¹sC ^L^PPÀO<Á…^HUU™™™\~ùå<÷Üs®•““Ãüùóù—ùî¼óNçw¿úÕ¯6lXÈs<ÿüóÜ{l9’;v0xð`zè!žþùóf̘Á׿þu6lØ@¿~ý‚¡†Î¬ÑѾáïÝ' Äüùó¹ï¾ûøÖ·¾ÀùçŸÏüùó8p`‡¯[göŸÍ+¯¼Âw¾ón¹åêêê:t(?ùÉO˜>þ€Ï³—½TSòýá~­¤ò€çTQEu¬¯9Ò2(ÒÖ÷:Þòû^ô"–ØÃûI’$I’$I’t 4Ú“]’:%@€9Ìá® w)’$°ž~úi¦Nî2$©Kk +¹’…,ä}Þg ü é$SSSÃúõëÛ Œ±wï^ ã°HvvöIsѺt ÐMgÌ;—ßÿþ÷<ÿüóGd½®¬¶¶–uëÖµù»æóÏ?g÷îÝÄÆÆ’™™üý2tèP† FNNýû÷'222Ì;9tÍ‘]좚jv²“Ýì†Jö°‡}죂Šà¹;ØA5Õìa•T²}ìbUTƒ&»ØE-µ>wsˆ$™d∣;ÝI"‰8âˆ'žD‰#ŽDI 8âèA’IÎ%’HzD‰$²=Â/^ Ðf°S’$I’$I’tpsæÌáškB¯_¶Cˆ$I’$IÒqèîá Þà-Þ2 "µ#&&¦Ã eeeÁ¿Ôß²³ÈŠ+¨ªª ®‘••ÕfXdèСtëÖíXnIêò|ð?øÁxæ™gÂ]ÎS]]MYYY«.ÅÅŬY³†úúz 4`–ŸŸÏÔ©Sƒ÷ ìÎr¢‰k)¤ñµë©g';Ùê©f;‚ÝIö˜ìc»ÙÍNvRM5»ØEeÁ€Énv³—½ìd'•TÒ@C›ÏK, $Ô4šC#‰$’D=è&I&™M#‰$’I&‰$â9pgI’$I’$I’‡I’$I’¤ãÌ3<ÃLfò<Ïsç…»鸕‘‘AFFF›Ç***ÚükÿÏKOOþ…ÿ–·AƒÑ£Gcµé°5ŽD—1cÆpÇwpÖYg‰ÒŽ™ö>÷ÅÅÅ”””_—–¡n¸!ø;`àÀ$%%…y'žH"IiGZs°d/{©hûßß®Œ2 ) ™ÛÊÖv;™4eÚÝèÖáñTRíV"I’$I’$IêI’$I’¤ãȼÁmÜÆ£<ʦ„»é„•’’B^^yyy­ŽUWW³zõê6»‹´×)`ÿÛ‰Ü)@ǧà éuŽ–æÐÇþ]>V­ZEee%ÑÑÑôíÛ7¤ÓGó÷C† !>Þ®'ŠnM#…2h; x°ª¨¢²iì`GðûæûÍ£’J¶²•U¬ 9¾›Ým®›@BHH¤'=úûþsF’$I’$I’NtB$I’$I’ŽEq-×rWñ †»é¤ǰaÃ6lX«c555¬_¿¾ÍÎ"EEEìÝ»7¸FFFC‡mÕa¤ÿþDFFëmI'„ö>ƒÅÅÅ|öÙgìÙ³øûg0''‡Ñ£G3uêT?ƒ:,‰M#‹¬Cz|uÁ€H!¡’–ÝJ¶³2ÊXƲ¹Z­™Lr0ÒV`¤W‹‘Jjðû"÷å$I’$I’$#B$I’$I’Ž›ØÄD&ò¾À,fùŸ¥.*&&&xQy[***Zu&(((`åÊ•ìܹ3¸FVVV›EN;í4ºwï~,·$u9ÕÕÕ”••µú,·Û¥'???$ôa—u5QD‡¢98²í!A‘ýçV²2øý6¶QEU«µöˆ4ßomÄ•$I’$I’$…ƒI’$I’¤.n/{¹‚+ˆ$’Wx…XbÃ]’¤C”’’˜1c3fL«cmv5((( ¤¤„ÆÆÆà999­º‹ 4ˆ=zë-IGE{Ÿ‡âââv?ùùù~tÒJjÙdwêqûØÇ¶c+[Ù–ûå”SD[ØÂV¶¶"éIÏHoz“F§p »zî"¥.…(¢èÝ4¢‰>B;—$I’$I’¤“›I’$I’¤.¬‘Fþ`+x÷èMïp—$é(III!//¼¼¼VÇÚ눰hÑ¢v;"ì³#‚ºš¶:æwØ1§eèÃŽ9Òá‹%–Œ¦q°j¨ †Eš$IÙ— IDATƒ#Ía‘æûÙÈÇ|̶°9{3µÚ5zÒ“S8…Þô&•TÒI~ŸÖ4RI H$I’$I’$Im3"I’$I’Ô…=À¼Â+¼Îë fp¸Ë‘&qqqÁ‹à'Mšr¬¦¦†õë×·ÙYä³Ï>cÏž=Á5222Úì.Ò¿"##ñ5ÀÚ{o·ùÞlîò1uêTß›RC éMã`,þëbª"«H?3=ÙÄ&6³™­le3›)¤0xl [h¤1øø(¢‚á4ÒH'4ÒÈ ƒS8…L2ƒóÉ$­mK’$I’$IR—d D’$I’$©‹ú%¿ä?øf1‹/ñ¥p—#©‹Š‰‰ ^<ß–ŠŠ Š‹‹C:1,Z´ˆçž{ŽÊÊJ¢££éÛ·o›E† B||ü±Ü’Ž#ÕÕÕ¬^½ºU—âââv»×ìú°{tâK¬OäÔ¦q õÔÃ!›ÙL9åli›šÆg|ÆF6²™ÍÔò÷î#qÄч>!a‘S8…tÒéÓ4šE}4·,I’$I’$IÇ„I’$I’¤.èÞá6nã{|¹1ÜåH:Ž¥¤¤——G^^^«cÍa‘¶º‹”””ÐØØ\£ùâý–ÝEHRRÒ±Þ’Ž±öÞ'Í·f)))Á÷G~~~ð=3hÐ zôèÆH:žD oH#lne”QN9›F9å|ʧlf3Ø@U!=¥id ŠdA_ú’N:}éKiDb—"I’$I’$I]—I’$I’¤.æs>ç ®`2“y„GÂ]ޤXGa‘êêjÊÊÊZuyá…Úíü°ÿÍÎDz²²6»|¬X±‚ªª¿]DCVVVH§–A¡nݺ…y’N6¤5/ð…ÏÝÃ6²1Øe¤Œ²`Xd3›YÂ’`בfÍá”,²È ƒ,²È$³Up¤;ÝöV%I’$I’$©MB$I’$I’º­le“È@f1‹^H-)<âââB.üo©¶¶–uëÖµÙYäóÏ?g÷îÝÄÆÆ’™™Ùfw‘þýûé_]?VjjjX¿~}›>ŠŠŠØ»w/ðÉÏÏgêÔ©ÁûÙÙÙDDD„y'’thºÓܦё}죌26°õ¬§Œ2Ö±Ž2Êø+eóØÈFj¨ >&™d2É$‹,ÒI§ýH'L2éÛ4zÑëhoQ’$I’$IÒIÈ@ˆ$I’$IRQM5“™L=õüžßû—†%uYÑÑÑÁ@[***‚aƒæî"‹/fΜ9TVV×èÛ·o›E† B||ü±ÜÒ ¡åë¾ÿ­´´”†† ãÐG{?SI:YÄË€¦Ñ‘ *(£Œl ~-¦˜2Êø”O)£ŒMl¢‘Æàº™d’C餓A9-F_úMô±Ø¢$I’$I’¤ˆI’$I’¤. ‘Fná–±ŒE,âN wI’tÈRRRÈËË#//«¯¾:äX{¡…‚‚JJJhll ®Ñ2¤ÐÜ]$77—äääpl«Kè(ôQ\\ @TTýúõ }4?xð`ü I:þ¥4a k÷œ=ìa-kYÇ:Ö6RJYÇ:þÌŸYǺ`§‘H"I'þM£_ÓèK_²É¦ýèAcµ=I’$I’$IÇ !’$I’$I]ÀC<Ä\æò¯q:§‡»I:jZ†Eö·oß>6lØÐª»ÈK/½Äš5k¨¯¯®ÑVg‘œœ @ 8ÖÛ:bêêêX»vm›a+VPUU@LL YYYm†>†J·nݼIRwº3¤i´¥‘F6²1iy×YËZ¶³=x~2Éô£_04’M6@9 `I$«­I’$I’$Iê" „H’$I’$…Ùlfó}¾ÏÏøù䇻I ›ØØØ`¨aµµµ¬[·®ÍÎ"Ÿþ9»wﮑ™™Ùfw‘~ýúþÿ,¾ð¥å­°°êêj 4ø’ŸŸÏÔ©Sƒ÷³³³‰ˆˆóN$I‡#@€Œ¦1ŠQmž³‹]¬aM«ÐÈ–ð*¯² 4ò·îZ½èiùµýˆ&úXnO’$I’$IÒ1þÿó%I’$I’t{—w¹™›¹Ÿû¹•[Ã]Ž$uYÑÑÑí†E***Z+/^ÌܹsÙ±cGp¾}û¶ÙYäÔSO%!!áˆÕ»=-o¥¥¥444í‡>† Fzzú«G’t|J aM£-5Ô°žõï7 (`5«ÙÁŽà¹)¤ÓÎÀ¿¶$I’$I’¤“•I’$I’¤0YÍj¾ÊW¹ŒËø7þ-ÜåHÒq-%%…¼¼<òòòZk/œQPPÐn8£ew‘ÓO?äääƒZ·°°ÂÂÂvC(ùùùG-„"I:ùÄ u´eÛ(¡„bŠC¾¾Ìˬe-µÔO|02h¿‘E–aI’$I’$©‹2"I’$I’ÛÙÎD&Ò~<ÇsDî’$é„ÕQXd÷îݬ^½ºÕmîܹ¬[·Žºº:zõêEnn.½{÷¦¤¤„ââbª««èÞ½;¹¹¹äææòÅ/~‘)S¦ï÷ëר(ÿS¼$)b1‹yŽçx„GèK_F0"$(Ò›ÞáÚ‚$I’$I’‘$I’$I:d3™É.vq—ñ_üwpñ7r#ßä›LcZ¸K”$I’$¤žôd|ÓhVI%Ÿò)‹›Æ¯øñP›DF2’S8%Œ;$I’$IÒÉÆ@ˆ$I’$IÒ!(¥”wx‡Æ¦1i,bÿÇÿqð3~î%I’$I‡)‰$Æ4f[Ùì"²˜Å<˳ÁN"Ùd"#Á¹œKIá*_’$I’$I'8!’$I’$I‡à—ü’(¢¨¥€F™Ë\ÒHãü‚(ÿ³‹$I’$RIåâ¦Ñl;XƲ`HäøäAi$‡F3š<òÃÎâ,"ˆã$I’$I’t¢ðÊI’$I’¤Nj gx&iVO=›ÙÌE\ļA?ú…©BI’$IÒ±”Lr«N"å”ó>ïógþÌ{¼Ç˼Ì^öÒ“ž|‘/2ŠQŒf4çp $„±zI’$I’$¯ „H’$I’$uÒ¼ÁF6¶y¬Ž:V±Š<ò˜Ï|F0âW'I’$Iê ÒHcrÓ€¿ýûâr–³ˆE,d!ÿËÿòI$§rj°ƒÈhF3”¡„y’$I’$Iêê „H’$I’$uÒÓÌÕK’$I’$éX3"I’$I’Ô OñõÔ‡ÌEA œÇy<ÁœÎéaªN’$I’t¢Én×s=ð·Î•ïð YÈù#ó8Dp&g’O>£Í\à)$I’$I: ‘$I’$I:HæÏ¬deÈ\$‘d’ÉøWsu˜*“$I’$,Ná®nå”ó'þÄBR@?ä‡DÉp†"r!=èæÊ%I’$I’t¤‘$I’$I:HÏð ÑDSK-ÑDI$÷s?ðqÄ…»þà/V©©©aÍš5}>À¶õÛÈ]–Kò;É5QD{÷º:x^óëØ¬ªªŠºººN=W[Zî±ùul“tt,!!ØØX’’’‚a•¤¤$bbbHLL$>>žØØX’““ƒá–ξþ’$I’¤ãG_úrKÓ¨§žOø„ ø¿ãçüœ"ÉH&1‰|ò9›³ 8ðÂ’$I’$I:æ „H’$I’t”ìØ±ƒªªª[[sìÚµ+dn×®]ìÛ·ÊÊJª««Ù»wïŸ/11‘˜˜˜VþGDD/ðOMM%..ŽÈÈHzôè<§[·nÄÅÅEbbbȱæ.-5?gTÔßÿó‚A‚Û¿›IË.+Íáž–ÝUš%Íǚ×j>Ö² KYYYð~sP¨9¼²g—¶4¿Gšö)))ÄÅÅ‘@=HNN&111äÖÖ\JJJHI’$IR×I$yMã~îg [XÀ (`&3y€H# L`“Ïx’IwÙ’$I’$Ijb D’$I’¤vìÞ½›íÛ·SQQAEEEðû¶æ***عsg0ÌÑ|Q~[IHH¹`¾ùk¿~ý‚ÇšÃÍAý»:ÄÄÄ’’Òéî ¯æ N³–]SŽ¥ÚÚZvíÚÅ®]»¨©©i·ÛLËIuu5UUUìܹ“ÒÒÒ ÓÎ;; šÄÅŵzÏ÷ìÙ“””RRRÚü¾åœ$I’$éèëMo®nOò$á/¼Þ4®ã:"ˆà<Îãâ¦q&g†»dI’$I’¤“šI’$IÒI¡¢¢‚Í›7³e˶nÝÊæÍ›ƒ÷Û yÔÔÔ´Z§ùbö–¬÷ë×áÇӣG6;#ì?'uÑÑÑG%lÑiFµ×)gçÎlß¾õë×óé§Ÿ†®ö•vƒ#©©©ôîÝ›ÔÔTúôér¿e÷I’$IRçDÁȦñ0³müÿGü”Ÿò¾Ã)œÂ—ù2Ws5˜@,±á.[’$I’$é¤âÿ—$I’$—öîÝËúõë)//oðhë~mmmÈã“““IKK#55•ž={Ò«W/ tÀn^`.u,!!„„„ÃZcÇŽÁpH{y¶oßÎgŸ}ÆÖ­[Ùºu+[¶l¡¾¾>dÔÔÔ#}úô¡wïÞ¤§§Ó«W¯ÃªY’$I’Nt½èìÒH#ó1¿å·Ìc/ðI$q1s9—s1“Lr¸K–$I’$I:áy‹$I’$©K©©©aëÖ­lܸ‘²²²v¿nÚ´)¤‹@\\)))dddžžNzz:C‡ Þowddd••ELLLw)©#ÉÉÉ$''3`À€N=nïÞ½ÁßÍÁ‘–÷×­[LJ~HYYY« Xll,={ö þÎèè«$I’$ì8»i<Â#¬aM0r7 À…\Èå\Ε\I:éá.Y’$I’$é„d D’$I’tÌÔ×׳aÃÖ¬YCii)kÖ¬aÍš5”••±nÝ:ÊËËÙ¼ysÈcÒÒÒèÓ§YYYôéÓ‡¼¼¼àEÙÍh§¦¦ðD·nÝÈÉÉ!''ç€ç666» 5‡Í6lØÀÆY¿~=Ÿ~ú)óçϧ¼¼œšššàãâããéÛ·/iiideeѯ_?ú÷ï¼eggÓ­[·£¹MI’$IêrúÓŸ;›F¼Ækü–ßòp'w2šÑ\ÅU\É•d‘îr%I’$I’NB$I’$IGLMM ëÖ­ =ö~¬_¿>øùcbb‚Rgff2|øpÒÓÓƒÁ¬¬,ÒÒÒ zH:*½{÷¦wïÞ 6¬ÃsËËËÙ´iëׯgÓ¦MÁàȆ øýïÏš5kعsgðü´´´HsP$;;›þýû“˜˜x´·'I’$Ia“B _oÕTó&oò/ñÿñÿqw1”¡\ÍÕLa îr%I’$I’ŽkB$I’$IR__Ïš5kX¹r%Ë—/gùòå¬\¹’+V°nÝ:€¿ý¥þЯ_?N=õT&L˜¼0zÀ€ôéÓ‡ˆˆˆ0ïF’,--´´4†Þî9m†áÞ~ûmÖ¬YÃÖ­[ƒçöêÕ‹Aƒqê©§2xðààmРAv‘$I’tB‰#ŽIMcûø#ä%^b&3y”G9—s¹–kù_#ôp—+I’$I’tÜ1"I’$IjÓÖ­[ƒ+V «V­bß¾}ôîÝ;xAóE]Dnnn0ôqÊ)§„y’t줤¤’’™gžÙæñÝ»wSZZ¼­X±‚åË—óî»ï²fÍêëë ôë×/$$Òü;¶ÿþ†è$I’$×b‰ †Cj¨¡€æ2—‡y˜{¹—±Œåz®çJ®$™äp—+I’$I’t\0"I’$I'¹æŽ………,^¼˜Å‹STTDqq1111dee1tèP.»ì2rrrÈÉÉáôÓO§OŸ>a®^’Žñññ 6ŒaƵ:V[[˺uë(..¦°°¢¢"–/_Îüùó)))¡±±‘˜˜H^^^ðvæ™g’†ÝH’$IÒá‰!†‰Mã)ž v¹“;¹ÛÏx®æj®äJðß{$I’$I’Úc D’$I’N"•••|òÉ',Y²„¥K—òÉ'ŸPXXHuu5ÑÑÑœvÚi >œÛn»áÇ3dȲ²²á.]’NXÑÑÑÁ°]~~~È±ŠŠ V¬XÁÒ¥KYºt)K–,aÞ¼yTVVANNgžy&gœqÇçÌ3Ϥ_¿~aÚ‰$I’$u^ËÎ!?á'ü†ßð¿ü/ÿÀ?0i\ÅUÜÌÍŒa üoT’$I’$I-‘$I’¤XYY‹-báÂ…,Z´ˆ?þ˜††’““6l£GfêÔ© :”¼¼<ºuëî’%I-¤¤¤0räHFŽ2_VVìèTXXÈìÙ³™>}: ôéÓ‡#F0fÌF͹çžKLLL˜v I’$I/‰$nnå”3‡9<Çs\Àä’ËÜÈMÜDú‡»TI’$I’¤.Á@ˆ$I’$ êëëY²d ‹- †@6lØ@LL yyyŒ7އzˆ#F™™îr%I‡!##ƒŒŒ &Mšœ«ªªâ“O> þsà‡?ü!Û·o'!!‘#G"£F"111ŒÕK’$IÒ¥‘ÆM£ˆ"žçy~ÆÏx„Gø"_äFnäz®'„p—*I’$I’6B$I’$é8V^^Îo¼Áïÿ{ ¨¨¨ 11‘‘#GrË-·/þµó‡$ø9ÿüó9ÿüóƒsÅÅÅÁ.Q/½ô>ú(œyæ™äççsÙe—qÞyçÆÊ%I’$©cCÊã<Îc<Æ|æ3‹YLc÷r/×s=S™ÊÙœî2%I’$I’Ž9!’$I’tœùüóÏyùå—yùå—Y²d Ý»wgìØ±<òÈ#Œ7Ž¡C‡za¯Ô ãÇçÍ7ß wÒQ‘““CNN7Þx#7näÝwßå7Þà…^à?øiii\qÅ\uÕUŒ;–ÈÈÈ0W-I’$Im‹&š¯4­låøžæižâ)Îá¦2•k¹Ö®!’$I’$é¤áB’$I’tغu+3fÌà _ø§v?ýéO=z4o¼ñÛ¶mãøÓ¦MãôÓOï2a@ Ðê–’’¨Q£˜;wn¸Ë «æ×ãdxîpîuuuuL:µÕ|CCC«¹ÂÂB¾ô¥/‘˜˜ØáZ¾¿WÚÛ±xþñó0uêTêêêŽÊÚ‡#==k®¹†_üâ¬_¿žO>ù„iÓ¦ñþûŸOŸ>}¸óÎ;Yºti¸K•$I’¤¥’Ê·øEñqgqw‘AÿÄ?ñWþî%I’$I’Žº®q•$I’$©MK–,áºë®#++‹Gy„Ñ£G³`ÁÖ¯_ÏOúS&L˜@\\\¸ËlScccðkcc#µµµ,[¶ŒÛo¿iÓ¦ñ“Ÿü$̆Oóks2?Ñ«=¨Ÿ‡óÏ?Ÿxਭ$†΃>ÈÇÌÊ•+¹ûî»yýõ×>|x0À×VhJ’$I’º’<òxЧXËZ¦3?ñ'òÈã‹|‘ÿå©¥6Ü%J’$I’$B$I’$© *,,dòäÉœuÖY|þùç<ùä“”••ñóŸÿœ /¼ÈÈÈp—ØiQQQdffrã7òâ‹/òŸÿùŸá.I'‘’’/^Ì”)SêüO>ù„+¯¼’ØØXòóóOÈÀ‡Ž®n¸¿üå/¬]»6Ü¥´òÝï~—åË—óöÛo“Íu×]ǰaØ3gN¸Ë“$I’¤êE/¾Í·)¢ˆ·y›L2¹‘É&›ÇxŒÍlw‰’$I’$IG”I’$IêBöíÛǃ>ÈYgÅúõëùíoË_ÿúW¾ñoîòŽ˜sÏ=—²²²VóË–-ãÒK/%11‘ÄÄD.¾øb–-[rN  ðÙgŸ1jÔ(ºwïÎùçŸÏªU«(,,däÈ‘$$$0nܸ6/Ä^¶l'N >ÇĉCž£yý@ À‚ ˜={vÈ|gê팃]oÞ¼yŒ=š¸¸8²³³¹ûªªju^aa!'N$!!¤¤$®¸âŠv/NïÌk¿zõj®¼òJRRRZ½&2pàÀàcƌөJKKC~«V­b×®]!s¥¥¥m>ïo~ón¸á†VómÕ¨««#**Š@ pX],XÐæ{g×®]$&&†|šŸ¯å{îç]YYÉÝwßMNNqqq¤§§sóÍ7óá‡vX_sM-k¼å–[BÎY·n“'O&11‘´´4¦L™Â¶mÛZ­åç¡õçaÊ”)¼ú꫇ü„K `ìØ±Ìž=›¢¢Aé¬ IDAT"Î9箿þz¾üå/SRRîò¤.!P__Ï¿þë¿’Mll,ƒæ‰'žhuîï~÷;FE||<ñññŒ5Š?üáa¨Z’$éä ÀXÆò2/³–µü#ÿÈñ_ô¥/×p ïñ^¸K”$I’$I:" „H’$IR±cÇ.¹äžxâ f̘Á_þò&MšÔ©‹í~ø!ÙÙÙ!s«V­büøñLš4‰ââbJKK™2e “'OfýúõÁóš;5LŸ>gŸ}–72|øp¦L™ÂÃ?̬Y³Ø°a§v÷ÜsO«ç¸ä’K¸êª«X½zuðBî‹/¾˜U«VPWWGnn.Û·ogìØ±\{íµ”••ѳgOvíÚÕ©zVgÖ›ù¤ÕûªÙ[o½Åˆ#ZÍ·Õù£y®±±‘ÆÆFüñNí±¥±cÇòÑGÑ£Gêêê‚ó¯¼ò IIIÌ;78WWWÇ!Cøè£‚ï=8¸Ÿ÷M7ÝDrr2ï½÷•••¼öÚk¬ZµŠ‘#GvXßþ{mllä™gž 9ç–[ná¶ÛncãÆ|ôÑGÔÔÔpï½÷†œãç¡íÏÈ#xóÍ7;½ÿ®äÔSOåùçŸçÝwߥ¬¬Œ‘#Gòþû,©K¸ýöÛÙ»w/lß¾ÿþïÿæ?ÿó?C:ê¼ÿþûÜ|óÍÜyç”––RRRÂ?ÿó?»I’$éèK'éLg-kù)?e9Ë9ó8Ÿóù¿£»‚J’$I’¤ãW ±­+$Ií `s¸†kÂ]Š$I'¬§Ÿ~š©S§†»Œcª¡¡üü|V­ZÅk¯½Æé§Ÿî’Žˆ@ ¼hº¾¾žòòrÞzë-î»ï>~ðƒpã7Ï2e gœq÷Ýw_È¿üå/Y²d 3gÎ YwÁ‚\xá…”••‘™™2·~ýzòòò(//yŽsÎ9‡»îº+ä9f̘ÁÇÌ /¼Àw¿û]233¹ãŽ;‚ç<þøã¬Y³†'Ÿ|²Óõèµ9Üõ***8p`H׆n¸#F´Úë¬Y³øÆ7¾qÈÏxûí·C Ò¼×¢¢".½ôRæÌ™Ã¹çžrNgj8ûì³yöÙg>|8óçÏçG?úo½õV»5 4ˆ÷Þ{ÔÔÔvë;ÐÜö×žÆÆFÒÓÓyõÕW5jùùù<ðÀ<üðÃ,Z´ø[a̘1lܸ±Ã X[?ïÄÄDÖ¬YCÏž=ƒs%%%äääpÕxõÕW™ N]qÅŒ?žÛo¿=ä¼'žx‚·ß~›W^yåXU.Ia³xñbòòòÂ\‰$ýÝðc~Ìø§q÷r/_çëÄîÒ$I’$I’Ú˜3g×\zý²Iê$!’$}'c ä¹çžcêÔ©|øá‡Á‹ÜOm]ÔžžžÎóÏ?O~~~È|Ÿ>}øàƒèß¿È|yy9ãÆ£¨¨(dÝ;w’˜˜ü-PÙj.**І††>GII çw7n`åÊ•\wÝu|ôÑGÁsÌìÙ³9ûì³;]o{¯MË%?VëmÚ´‰ôôôC~î@ ÀîÝ»éÞ½{‡õì_[qq1ùùù<óÌ3Œ7®Õ9©aæÌ™,_¾œŸýìg\zé¥Ü~ûí\zé¥íÖгgOÊËË‰ŽŽn³¾£¸ùæ›4h>ø 6làòË/ç/ù ]tÏ<ó  `ÆŒ,]º”Y³fuú9'L˜Àž={øÞ÷¾G~~>QQQUûêlß¾”””à\}}=ÑÑÑõÙ:Ù?555ddd°uëÖë=žÔÖÖ2räH²²²˜7o^¸Ë‘޽Õ«W“““r¸¢¢"Øi  ÿþ,\¸¾}û†œ·víZ.¸àJKKQá’>B$ue+YÉ<ÁÓ}Ú¼X²9øÑæÜþ¹oݺ•>}ú´Z+===ä‚íAƒѽ{w–.] ÀŸþô'zôè ƒJ½r°ëmÙ²…[n¹…¬¬,¢¢¢‚çí¯½½¶5×Ù½t& Òìâ‹/f×®]ìÞ½»Íã©áúë¯gîܹìÙ³‡ââbJJJ˜8qb‡Ï_UUÕ©Ä‘vÉ%—;˜üêW¿âúë¯àÚk¯eöìÙüîw¿kµƒýy¿ôÒKœ{î¹ÜqÇôìÙ“ .¸€™3gR[[{ص· ƒDFF¶úlùyh[TT•••·éãDtt4÷Üso½õÖyIdzìììVs)))TTTï———sÊ)§´:/--M›6Íò$I’t1ˆ™Ìd+ø:_g3ÀäA¶±íÀ H’$I’$…™I’$Iê"N†ŽÉÉÉÜ|óÍÜ|óÍ<üðÃ!ÇRSSÿöî<,Êzÿÿøk@6YG@vÁ—DMŲ„¾jáR¹𥶛©©uÒ¬NiÇNºÌìduŽ•••iÙbYzŠ2 ­ìà’J悲ãÊâÂ*üþè7sÜnçãºæš™{îÏ}¿?7~^óÖ±cÇì$ÖÛ©S§.Èùl]@ªÊÉÉQ@@€Ý¶;ï¼So¾ù¦$éÍ7ßÔ½÷Þ{Që­ëñÆ'OOO%%%©¸¸Ø¶£ã9ZhzôhõÅ —âÚÏ™3G_~ù¥î¹çýüóÏçUC`` âââ´lÙ2½öÚkš:uªÃ@UÞÞÞ*//¿ s9ýû÷צM›TTT¤>ø@cÆŒ‘$1BŸ|ò‰òòòôÓO?©ÿþvãêúõöõõÕüùóµoß>¥¦¦jòäÉZ¶l™í<ߎ•——Ë××·^cƒ¦ðû ¨ k µ6AAA:tèPµí‡rJ€1"¡ô‚Ò”¦š¡EZ¤Öj­ÇôÁР€`À€Z³f¶nÝjt)—ĤI“´råJ;v̶mÀ€úþûï«íûÃ?Øuæ8 úä“OªmÿôÓO•`·mäÈ‘Z¹r¥>¬5kÖØ::\¬zëz¼7ê©§žRTT”­ã…£Eêýû÷×gŸ}Vm»µKŹœû|Œ3F=zôÐ{ï½§?ýéOÚ³gÏyÕpûí·ë•W^ÑÊ•+5~üø³žßßßßÐN f³Y]»vÕ+¯¼¢   Û"à-Z(88XóçÏW=äççg7®®_o“ɤÌÌLIF­/¿üR_ýõYk;[˜¦.ø~p¬  @þþþõÓЕ––jÞ¼yºá†äââbt9@ƒ×½{w}ñÅÕ¶þùçìw,.³ÌzLé€h®æê-½¥(Eiš¦é ]@5ÍŒ.ðǧÜ/Y²Dƒ Ò—_~©+¯¼Òè’.ª€€ wÕºëÚUÂÚñºvv¶n¹åíÝ»WÁÁÁzï½÷Îéz:99ÉÇÇGS¦L©S7Üpƒ’““«m?Û{çló<Ûû¼ªo¼Q&“IÆ ³Û>tèP¹¸¸è¦›nª6¦®_ïÄÄDyxx¨wïÞòññѵ×^«ââb½óÎ;g­kþüùºí¶Ûäåå¥É“'ÛB5Íí|Þ¿Ž\Îß›7o¶-oì’’’tõÕWë—_~Qbb¢¢¢¢Œ. hzõê¥Å‹kÁ‚ŠŒŒTdd¤,X ·ÞzKW_}µÑåà,¼ä¥™š©T¥ê¯ú«ÞÐj£6zNÏ©HEF— Seee¥ÑE@cb’I˵\£4ÊèR¸l-Z´Èáü¦ ´´TO?ý´ž{î9uìØQ³gÏÖСCë¼øh*>üðC­ZµJK–,©Óþû÷ï×wÞY­ p1õéÓGK–,Q«V­Œ.圥¤¤è™gžÑÒ¥K5pà@½öÚkŠŒŒ4º,À8~(-ñgu¨3k0;66ÖàJàüÔI-ÔB=£gä-o=©'u—îR353º4p™3™LZ¾|¹F²_¿L‡0ÀÉ“'•••¥;wjãÆúꫯ´téR½ùæ›òððиq㔟Ÿ¯áÇëÊ+¯Ô믿®ãÇ]6`8“ɤM›6é¹çžÓƒ>Xçq­[·V÷îÝmI€‹í½÷ÞS=e¤¢¢Bß|óFŒ¡Î;kË–-Z¾|¹¾úê+ š4Oyê=¢}Ú§±«©šªÎê¬ô‘*Eh\z|LœƒÂÂBåçç«  @ùùùµ>ÎÏÏW^^žÝ¶òòòjÇtqq‘¯¯¯üüüäçç§èèhuèÐAš:uªzè!5J·Þz«úôé#gggf/>>^“'OV·nÝê5îùçŸ×ý÷߯±cÇ^¤Ê€ÿY¿~½^}õU£Ë¨—]»véÃ?ÔÛo¿­ýû÷«W¯^úðÃ5|øp99ñ¹2` ýCÿÐݺ[ëqÖhÅ+^ÿÔ?ÕU].4!B4Yååå:zôhµÛ‘#GtäÈ=z´Z¸Ã쨨¨¨v<777[˜£j°Ãb±Ø;zÝúØÓÓ³ÆZ,X ÷Þ{Oo½õ–/^¬ÀÀ@ >\Æ SŸ>}äááq1/Ð`TVžû§m6kÖL¯¿þú¬¨Ù¢E‹Œ.á¬*++µeË}ñÅZ±b…vìØ¡   3F÷Þ{¯:vìht‰Ð µU[}¨µI›4MÓÔC=t¯îÕÓzZþò7º<ÐpY(**² u>|ØaØ£êë………ÕŽããã#Êßß_~~~ ²…7Ìf³Ã0‡ŸŸŸÜÝÝ/ÚüZ´h¡©S§jêÔ©Ú»w¯V¬X¡+Vèõ×_—»»»z÷î­êúë¯WLL Ÿäp(;;[?üðƒÖ¬Y£5kÖ(77Waaa>|¸.\¨øøx:P@=]­«µQµB+ôýE˵\³5[“5YÍX–."þò Á)))Ñþýû«…:Ž;VcàãÔ©SvÇ0™Lò÷÷·»©cÇŽv3o...ͺ£5kÖ,Íš5K‡Ö÷߯/¾øBO=õ”¦OŸ.///]sÍ5Š‹‹Sll¬þïÿþO¾¾¾F— 0@jjª’’’´aÃ%%%)%%EÎÎÎêÚµ«î»ï> >^=zôP«V­Œ.påççkëÖ­Ú°aƒ6nܨ7*??_ÞÞÞ¶ `\\œ®¹æyyy].Ð8}ø¡4z´ÄŸÕ Î’““%I±±±W—Öïú]Ó4M_ëkݧûôýC¾âC[À¹1™LZ¾|¹F²_¿Ü´VG¸  •““£C‡éàÁƒÊÍÍÕ¡C‡”››«ƒêСC¶×‹ŠŠìƶlÙR RHHˆ®ºê*+88XÉÉÉ7nœ­‹‡···A3l\¬ŸøÞµkWM™2E’”­äädÛ'Ã/\¸PeeeòõõU§N£Ž;*66VÝ»wWóæÍ ž .¬?ßSRR´sçN%''ë·ß~See¥BBB«Y³f)..N={öl°àrÒ^íµFkô‘>ÒMÑgúL/ëeýY6º4p!ÀΙ<uôÈËËSVV– ìÆº»»ÛuêèÒ¥‹ `·-44Tµ.L-))ÑUW]u±§Ú$„††*44Tƒ–$?~\Û¶m³Ý¶nݪ÷Þ{O§NR³fÍÔ¾}{]y啺òÊ+ÕµkWµoß^rrr2x&Ð4;vL¿ÿþ»¶oß®­[·ê×_Õ¯¿þªãÇËÙÙYÑÑѺòÊ+uÛm·Ù~v‡……]6àÿ©‘JP‚fi–Fi”nÖÍzM¯)\áF—.B€&¢¤¤DGµ z¤¦¦Ú…>233UXXh7®jÈ#44T±±±¶`G}Bh¼½½¯øøx»íÖOš·~Úü;ï¼£Y³f©²²R®®® WÇŽ#‹ÅbwœŸÒÒReffjçÎJIIQjjªÝMúãçw»víÔ±cG9R±±±êÖ­›<== ®p6f™õoý[#4B5QÕY µP·é6£Kà2››«ÌÌL[¨#77WÊÉÉQff¦rrrtìØ1ÛþÍš5SPPÂÃì:¨oß¾ SË–-ª–-[*00PÍšñ¿ MÁ™D¤?>•~÷îÝÚµk—vïÞ­={öhõêÕÚ½{·Š‹‹%IþþþjÛ¶­:tè ¶mÛªM›6ŠŒŒTdd¤BBBŒš48………JKKÓ´ÿ~íÞ½ÛvËÈÈPEE…œœœ©víÚéŠ+®Ð°aÃÔ®];µmÛV‘‘‘2™LFOpú«¿vh‡fi–ÆiœVi•^Õ«2Ëlti ‘beÐÀUëæaýÄðììl¥§§ëĉ¶ýÏìè1hÐ Ûcë}«V­zà¬Z´h¡k®¹F×\sÝöŠŠ eddØ2ÿþûïÚ½{·Ö­[§ŒŒ •——Kúã½h ‡T½EEE)**J¡¡¡rrr2bjpÁ;vÌø°Þ[§¥¥)//϶oË–-Õ¶m[µoß^7Üpƒíqtt´ÜÜÜ œàbk®æú§þ©!¢;t‡:ª£ÞÒ[¨F—!V€*))±-ÍÌÌTzzºÒÓÓ•™™©ÌÌL¥¥¥éäÉ“¶ýÍf³ÂÃéèèhõéÓG­ZµRDD„ÂÃÃ&g„¦Àú ö‘‘‘êׯŸÝkååå¶÷nZZšöï߯´´4íÛ·Oß}÷222TZZ*IruuUxx¸¢¢¢n -………ÙƒY ÀP•••:xð rrr”••¥œœeggÛnÖÐÇñãÇmcBBBl¸T Ä5oÞÜÀ‚%h‡vh²&ë&ݤô€ž×ór uG ¸ˆJJJ”••e×Ñ£j‡´´4>}ZRõÎݺu“Åb±=ŽŽ–¯¯¯Á3j׬Y3Û‚gG***l ¨«~r~vv¶¶mÛ¦ÜÜ\ÖÇj­ÖF— !Ày8uê”RSS«-nOKKÓtèÐ!Û¾f³Ùöéà1114hÝâÑ-Z8àÒprrRxx¸ÂÃÃ_ã~yyyv!ªª÷Û·oWvv¶<¨ŠŠ Ûwww™Íf™Íf[¸ª¦çòññ¹Sp Ù~Väåå)//¯Æç‡Vyy¹m¬õgˆõçÅ•W^©*44ÔÐ “ŸŸŸ3\ŽFj¤º©›Fh„®ÒUzOïi ]h„ga]˜îèvàÀÛ‚t³ÙlëæÑµkWýéO’Åb‘ÅbQ›6mX@ ÔƒÙlVll¬bcckܧ¸¸X999ÊÍÍÕáÇuäÈåææêÈ‘#:|ø°>¬íÛ·Û^+))±ïíí­   ªE‹¶°HÕÇŽžóÉÿÀÅUQQ¡¼¼<;vÌâ¨úøÌçÖïñÇ«²²Òv'''( @jÙ²¥"##Õ£G¨eË–jÙ²¥F.€¡¢­Ÿõ³¦hŠnÒMš©™zFÏÈINF—0!hòJKK•––¦}ûö)55Õvo}|òäII’›››¢¢¢Ô¦MuèÐA7ß|³-ðѺukyzz< iqwwWëÖ­Õºuë:í_XX¨ƒÚ-·H¬ Ë8`·è¼¨¨¨Úqš7o^cXÄÇÇGÞÞÞòöö–Ùl¶=¶Þüüüäãã#ggç }9€£  @Ç·Ý •ŸŸo·íøñã5†> ªÓÙÙÙaX+,,ÌúhÙ²¥‚‚‚l€€99±hÐx¸Ë]oè õPM×tmÓ6½¯÷ÕBt—Ž@“púôi¥¥¥i÷îÝÚ³g~ÿýwíÙ³G{öìQzzºNŸ>-IjÑ¢…Ú´i#‹Å¢AƒÙº{X,…‡‡³°hÄ|||äã㣶mÛÖyLQQQ] ª>?tè~ÿý÷j à«v+¨ªyóæ¶ˆ¯¯¯|}}í‚#>>>òôô”«««Ìf³\]]åéé)///¹ººÊÏÏOîîîòðð\]]åããs¡.šˆ¢¢"«  @¥¥¥:~ü¸N:¥’’åçç«´´T'NœÐÉ“'UZZª¼¼<Û:ª¾ç?î0Ìaåååe÷>·†:¢££ÏÚ¡‡÷7 )™¨‰ŠU¬þ¬?ëZ]«/õ¥¢mtY "€ËJVV–öìÙc ~Xé©©*--•$¨]»vjß¾½úôé£èèh[ðÃÏÏÏàhH<<<äáá¡ÐÐÐsâĉj- ·vT(,,TVV– íæ—””غ­^wwwùúúÊÕÕUÞÞÞjÞ¼¹ÜÜÜl'''ùúúJ’­[‰uœ³³³mὯ¯¯œœœlã]\\äåå%Iòóó“Éd’$»íÒÝ”š7o~N׫)(++Ó‰'lÏKKKí¾¶'NœPYY™JJJtêÔ)UTTØBÇWyy¹Š‹‹UTT¤Ó§O«°°PÒ]9***l!òòr?~Ü6Þ:¦°°P¥¥¥¶qµqDòðð°:,‹]WœÚNUß3àì®ÒUúY?kˆ†¨§zêS}ªÿÓÿ]h`„ Ñ9uê”víÚ¥]»v)%%Åüسgm‘­···Ú¶m«víÚiäÈ‘j×®Úµk§¶mÛÊl6<M…———¼¼¼rAŽwâÄ •––*??ßnIIIµîÖ‰5``}­¤¤D©©©’dëbbíQ5œ——wAj¶Q¬¬+k'Gj{Í‘ú„¬×¥®j»U_«àdÚ8_Öëa j˜L&[ÑËËK...¶@Ž«««,‹$Uë2ãææ&___».3nnnv"/XÁZ¯õº]·«¿úk±ëVÝjtY !€+??_»víÒÎ5<Æh IDAT;máß~ûMiiiª¨¨‹‹‹¢££Õ¾}{%$$hÒ¤I¶È…Z| ‰µG‹-.Ù9ÏìXQYY©üü|ÛëÖ`Š•5xbe ›XY»\XêôéÓÕÎ{f°âl¬õÕ•ŸŸŸ¼½½ë¼¿µ‹Š#ÖN+U]5LQ5ˆXµC‹$»®,’ÎÚ‘4-îr×R-ÕÃzXc5VÊÐ#zÄè²@A †ËËËÓÎ;•’’¢ÔÔTÛãýû÷«²²R®®®ŠŽŽVLLŒF¥Ž;*&&F1115.Î\ÖPŒá,g½¨e‘EÓ5]yÊÓ?ô£Ë \2Ú±c‡vìØ¡_ýÕöøØ±c’þøõ:(&&F½{÷VLLŒ:tè ¨¨(>Ф= ¨@Ó8ªP¯è™ÄßÎhÊ„à‚+++Ó®]»ì‚Û·oWZZš$É××W:uRçÎ5zôhuèÐA;vTpp°Á•ÐpÝ¢[ä,gݦÛT®rýKÿ’“œŒ. „@ÎKZZš-ða ìÚµKeeerqqQ‡Ô©S'Mœ8щŒŒ4ºl¥‘)7¹i”F©LezSo  ‰"€:ËÎÎVrr²íöóÏ?ëðáÃ’¤ÅÄĨoß¾zøá‡£Ž;ÊÃÃÃસ¼ Ñ}¦Ï4\ÃÕ\ÍõŠ^1º$`!¨¦´´T;vìЖ-[´eËmÞ¼Y¿þú«Nž<)ÅÄĨ[·nzòÉ'ÕµkWuîÜY¾¾¾F— @“1Pµ\Ë5B#¤ =©'. \bBš¸'Nè×_ÕæÍ›m;v¨¬¬LžžžêÒ¥‹ºu릻îºKÝ»wW§NäêêjtÙ4yC4DoêMÝ¡;ä'?MÕT£K—€&äôéÓÚµk—’““m·_~ùE¥¥¥òññQçÎuÝu×iúôéŠU‡äììltÙ ã5^ÊЃzP! ÑH4º$p‰¸ŒíÛ·O›6mÒ¦M›ôË/¿hóæÍ***’···bcc§|PÝ»wWëÖ­.–¥K¥ŒŒÿ=ÿõ×?îŸ{Î~¿„)6öÒÕœáq=®ƒ:¨ñ/‹,Šÿ} @S@ à2QPP íÛ·kÆ JJJÒ¦M›tèÐ!5kÖLíÚµSll¬FŽ©øøxuíÚ•Îp6S§JùùR³*Jwu•fÏþßó’iòd!0Ü-Ð^íÕp W²’¨@£K€F¨¢¢B)))JJJÒ† ´qãF¥¦¦J’¢££uÕUWéÑGÕÕW_­nݺÉÃÃÃàŠ =Zzýõ?BµùóŸ/M=@-œä¤÷õ¾z¨‡Fk´¾Ö×jƲ.küŸ?@#PTT¤ÿþ÷¿úᇴqãFmذAùùùòòòÒ5×\£qãÆ©gÏžºúê«åïïot¹py3FzõÕÚ÷ ”®»îÒÔœ…Yf}¢OÔK½4K³4OóŒ. \DB ÂÂBmÚ´ÉÖ$))IÅÅÅ V=4kÖ,ÅÅÅéꫯ–«««ÑåÀå).N •²³¿îê*/9;_Úº€Z\©+õ/ýK·ëvÅ+^Ã4Ìè’ÀEB  HOO×Úµkµ~ýzmܸQ»ví’“““:vì¨øøxÝ~û튋‹SëÖ­.š“I;VzñE©¬¬ú륥t˜q§µZ«‰š¨xÅ+@F—.!ÈÉÉÑÚµkm·}ûöÉÝÝ]W]u•†®¸¸8õêÕKf³ÙèR i3FzþyǯEFJ±±—¶ Žh¾Õ·ºW÷êS}jt9à" p 9rD?þø£6lØ ÄÄDmÞ¼YNNNêÚµ«þüç?+!!Aqqqòðð0ºT@U]»JmÛJ{öØowu•î¸Ã’€ºð‘k±ú©Ÿ–i™nÑ-F—.0!Á‰'ôÓO?)11Q‰‰‰Ú²e‹L&“ºvíª¸¸8=òÈ#êß¿¿|}}.p6ãÆIsçJeeÿÛVZ*m\M@Ü 4Q5ESÔW}¤ £K€  ¼¼\?ýô“Ö¬Y£ÿüç?Ú²e‹*++Õ¥KõíÛW³gÏVïÞ½åããct©€úºõViöìÿ=7™¤Î¥+®0®& Žž×óZ¥UzBOh‘]¸€„œ£¬¬,­Y³FkÖ¬Qbb¢òóóe±X4`À=úè£êÝ»·üýý.p¾Ú´‘ºv•¶m“**¤fͤñã® ¨/yé=£Ûu»&j¢º«»Ñ%€ „@@Y»€¬ZµJ‰‰‰Ú¼y³ÜÝݧY³f)!!A±±±F— ¸Æ—fÌø#R^.mtE@ݦÛôš^ÓÃzXßé;£Ë€Zìß¿_ß|óõõ×_«  @‹E zä‘G4pà@y{{]&àb5JúË_þx|íµRx¸±õõ`’Ió4OqŠÓ*­Ò 2º$p¨¢²²Rÿýïõé§ŸjåÊ•JII‘———®¿þz=ûì³8p Z·nmt™€K-4TŠ—Ö¯—n¿Ýèj€z»V×êÏú³ÓcºY7Ë$“Ñ%€óD 4yeeeúþûïõÙgŸiåÊ•ÊÊÊRTT”†ª—_~Yñññruu5ºLÀ%TPP ¬¬,eee);;[™™™ŠtqÑhggÍݶM-,PHHˆÂ¦ÐÐP¹¹¹]6P«'õ¤º¨‹ÖhnÔF—ÎÐ$:uJß~û­>úè#}ñÅÊÏÏWÇŽ5vìX 4Hqqq2™ød,¸Ü”••éàÁƒÊÈÈPNNŽ233íB999ÊÈÈЩS§lcÜÝݦðÐP­íÛWÛùEYŸ}¦ÜÜ\UTTØö THHˆ"""¢ððp………Ùm 4bÚ€$©“:i€hžæà2@ 4GÕ—_~©U«V髯¾RQQ‘®½öZÍš5KÆ Sûöí.pŠŠŠ”““£ììlåää(55ÕöØzŸ––¦Ó§OÛÆ˜Íf…„„(44TêÙ³§í¹ÅbQHHˆ‚ƒƒåäääðœyyy5žoË–-JMMU^^žmWWWùûû+44´Úy¬ÛZµj%ooï‹~½Ð4=¬‡• mÒ&]­«.œ!à²vôèQ}ôÑGúàƒ”””$777õïß_/¿ü²¬€€£KœEii©Ž9bì83„‘••¥‚‚Û777µhѸˆµ a„„„(**JžžžçU›Ùl–ÙlVLLLûœT©ZûÎ;•˜˜¨ôôt•——ÛÆ¸»»; ‹T½ŒŒ”³³óyÕ€¦çÝ îê®Z ¥Zjt9à<—'NhåÊ•úàƒôõ×_ËÅÅEC† ÑŠ+4`À5oÞÜèÿŸ5,ᨛG]ºz„„„(66¶Zp"$$D&“ÉÀ™ý‡‡‡,‹,K­ûÕÖm$%%ÅöÜÊQ·‘3»ŽDDDÈÇÇçbOÌTMÕ}ºOyÊ“Yf£Ëçˆ@¸,”””è믿ÖG}¤O?ýTEEEêÛ·¯Þxã >\ÞÞÞF—MJ]ºzdffª°°Ð6ÆÚÃj8³«‡ÅbQDD„\\\ œÙÅS—n#ÅÅÅÊÎÎvžIMMUbb¢222TVVfsæuuÔu„n#MËŸô'ݯûõ‰>ÑݺÛèrÀ9"­ŠŠ mܸQ}ô‘>øà=zT×^{­ž~úi3F-[¶4ºD¸,Õ¥«ÇTQQaÓØºz4Tîîîõî6rf'11QÙÙÙÊÍÍUee¥mŒÙlv±n —¯¯ïÅž".oyëfݬô1! ÑÙ±c‡/^¬eË–)''G±±±zä‘G4zôh…‡‡]4Z%%%:zô¨Ãu[FF†Ž?nÓÔ»z4Tuí6rìØ1‡_çóé6ÒªU+5kÆ??4t·èÒ(e)Ka 3ºpøÐ(?~\Ë—/×›o¾©Ÿ~úImÚ´Ñ„ 4f̵oßÞèò ÁsÔ1âÌÐÇÁƒkìêa±Xg·ðßzÆÉî°yjr!»XïÛ´i#??¿K1MÔàfÝ,/yéc}¬©šjt9à Zrr²-Z¤¥K—ª¬¬LC† Ñܹsuà 7Èd2]®jW3»­¶mÛjÊ”)ºýöÛ«Õy®êÒmDú#ÔVS˜-%%E{÷î­w·‘ÈÈHyyy]y€¦£ÚI’vk7SeCù!h$L2i¹–k”F] Ðà:tH¯¾úª^|ñE¹ººjòäÉš6mšÌf³Ñ¥hd-Z¤ &]¸j[““£}ûö)??ß¶ÕÐg~Z¾u[«V­äíím଻Ôá‰Kq>£!ÃÅšKC¹F ¥«ªõ)77W›6mÒ›o¾©œœ}òÉ'jÛ¶­Ý˜ÿüç?zúé§õøã«W¯^ruuUNNŽV­Z¥'žxB¶cΙ3G¿üò‹ž}öYµk×NÅÅÅÚ²e‹zè!mݺնŸÉdR—.]ôÆo¨S§NÚ³g&Ož¬þýûë‰'ž¸´¥jë6RS@Žn#hh’““%©ÖÎ:ã™eÖ?ôݧûŒ.8`2™´|ùre¿~™@Ô ºÜÜ\=óÌ3zýõ×e6›õ—¿üE'N”§§§Ñ¥h¤„Ðø°h¹:! K«¶z-Z¤gŸ}V[·n•¯¯¯$)33SÆ ÓÚµk¾Õ¯_?Û1ƒ‚‚´}ûvµlÙÒn¿””ÅÄÄØB¶mÛfë,"ýÑ…$..N¹¹¹d®FhJa;4>B q°È¢‰š¨™šit)Àš!Í ª\Ž=ªçŸ^ .”ÙlÖ¼yót÷ÝwËÝÝÝèÒ\@5-4¶nËÊÊRAAmÿª -‹bcc«…>"##åååeà¬.½‚‚Í™3G+W®Tvv¶Ìf³  I“&éꫯ®uì¶mÛ4cÆ mذAÎÎΊ‹‹ÓôéÓ5`À€ǘL&Iª¶ßÑöºÔfg½—¤»ï¾[o¼ñ†$iÇŽzä‘G´~ýzIR\\œæÍ›§N:U;÷Þ½{5cÆ ­]»Ö¶H½¦°€£z­ÛÒÓÓ5eÊ}÷ÝwjÞ¼¹úõë§—^zIþþþuïh.5]·Ï?ÿ\Ï=÷œ’““¬áÇëoûÛYÌ×vþ3¹¹¹©¸¸XRÝ®§$íܹS3fÌÐúõëåìì¬ë¯¿^/½ôR­5UµcÇÍœ9S?üðƒ$éºë®ÓóÏ?ïðëV—ë}.&L˜ äädÍ›7OsçΕ$ÍŸ?_÷ß×7!!Áîšž:uÊ&©ªcÇŽvû9zŸùûû«¤¤ä¼æ`4³Ù¬ØØØZÜŸÜ«ú³|çÎJLL¬Üswww©zÕhƒ{à¼ä¥:at ž„€z+,,Ô‹/¾¨ùóçËÍÍMsçÎÕý÷ß/£KPuéê‘––¦Ó§OÛÆœÙÕ#66¶ÚBá¼7e·ß~»ºuë¦üQ~~~JIIÑ< ž={ÖÚÉa÷îÝ=z´.\¨^½zéÀš8q¢Xë¸ÊÊJ‡_GÛëR›uœ£sîÝ»WýúõÓìÙ³õöÛoËÉÉI«W¯ÖСCµnÝ:…‡‡Ûãþûïל9sôþûïëûï¿×M7ÝT¯yX·ÝsÏ=zðÁõþûï+//OÓ¦MÓÃ?¬·Þz«NãÍ¥¦ë6tèPýûßÿÖ7ß|£ÂÂB=òÈ#zàôöÛo×X{mÇ«zîÒÒRÅÇÇë®»î’T÷ë¹oß>Ýxãš3gŽm¿5kÖè–[n©µ&«½{÷êÆoÔSO=e›Ç矮êûï¿Wtt´Ýêr½ÏÕ½÷Þ«»îºËY³fxà:[½ÖmŸ}ö™†jÛöûï¿ëúë¯WVVVÆ×Ö•äl:ÏËËStt´Ž=z^ç’¤I“&)??_K—.•T÷ë9nÜ8õèÑCÓ¦M³Ûïí·ßÖwÞyÖ9Œ;VW]uUµñóçÏ×–-[ôî»ïÚÍ¡®×Û‘³]ƒ“'O*((H'Nüñ©ÔÍ›7W~~¾\]]Ïzlé!?þ¸–,Y¢ž={jРA:t¨ÂÂÂž®uëÖéå—_ÖþýûõÓO?©M›6u:¤ââbeggWûSõwMFF†ÊÊÊlcêÒm$22RÎÎÎÎ Crr²$ÕÚÁ`¼ Vj¥×õºÑ¥L&“–/_®Q£ì×/€z"‚¦jåÊ•š5k–RSS5eÊ=þøãv …àB"@ͬ]=uó¨kWG réêqá¹¾ÿþ:uê”þú׿*!!AÍšÕ­spp°6oÞ¬ÐÐÐz¯¦mçS[MÇ ÖÏ?ÿ¬ÈÈH»íTß¾}•’’bwŒ“'OªyóæµÎ§.s;vì˜]7ˆ’’yxx¨¢¢¢NãÏ'RŸãÖv¼>ø@ûÛßôË/¿ÈËËKRݯgMûåææ*$$ä¬s¨iüþýûÕ«W/».õ¹ÞŽœíšž8qBÁÁÁv¼¼<¹¹¹õØUhõêÕúꫯ´zõjõïß_¯¾újµ³0™LŠŒŒÔˆ#4cÆ ×ë<¨›ªÝFjê:’››k{oÔ¥ÛHxx8NÒÈ€Æa„FÈE.Z¦eF—¨)R·qMÖÖ­[5}út­_¿^#GŽÔ—_~)‹ÅbtY@“1}útmÞ¼YÙÙÙÊÊÊRqq±í5OOOEDDØÈÆÇÇ+44Tááá QDD„‚‚‚äââbà `õÑGé©§žÒäÉ“uøðauíÚU#FŒÐ¤I“jý9rD ²6«£G***ÊákÕ¶Õ' R›ªáIrss«S£¾>¬G}TkÖ¬Qnn®]àê|ìÚµKÓ§O×·ß~k ƒHu¿žGŽqd¨k¸¡¦ñ!!!:räHµíóz§¤¤ØuèhÕª•²²²êý7___ÝrË-ºå–[tòäI?^S§NÕ;ï¼SmßœoÙ8 ³Ù,³Ù¬˜˜˜÷9uê”222”““£ÌÌLÛﻬ¬,íØ±C«W¯Vnn®]·kG«ððpÝyç=zô¥˜—5g9«Bgú€†ÅÉè@ÔŸŸ¯x@=zôPii©6nܨåË—€säëë«ùóçkß¾}JMMÕäÉ“µlÙ23¦Öqþþþç×…ÉdRII‰Ý¶cÇŽ]°Ú¬tìØ1UVVV»:uêœjoHÆ'OOO%%%©¸¸Ø6·óqòäI1BÏ=÷œ:uêd÷Z]¯g@@€rss«ûèÑ£uª! À® ˆUNNÎE!éßÿþ·† f{>pà@}óÍ7çuLOOO½øâ‹úì³Ïη<€‰!ÀNee¥Þ}÷]Íœ9Seeezá…ôÀÈɉ)`„ Ø=/**RNNŽRSS•­œœÛ}RR’rrr”––f×ÁÀl6+$$D¡¡¡¶{‹Åb·-$$D&“éRO¯I1™LÊÈÈPxx¸4zôhõëׯÆNV}úôQbb¢ÆoÛ¶mÛ63F)))µŽ VzzºÚ¶mkÛ¶~ýús®­¦÷È€ôý÷ßkøðávÛøáM›6M›7o®µN#Ôçý¾qãF-[¶L~~~¶mçt™8q¢zöì©;î¸Ã®¦ÊÊÊ:_Ïþýûë³Ï>Ó´iÓìöûöÛoëTCBB‚>ùä=ôÐCvÛ?ýôS%$$œÃ¬ÎÍk¯½¦o¿ýVÛ¶m³m{衇4hÐ =Úîº[}òÉ'Z¸p¡¾ûî;I\»ýû÷W{Ï6kÖÌ®ûŠÕÅè$ƒêòòòªý®:ó÷Wnn®íëáêê*Ûï¦N:©_¿~v¿·ÂÃÃåëëkðÌh„›””M˜0A?ýô“î¿ÿ~Í;×ᢠÆñððÅb©µ{_ii©Ž9b··êBÜäädeffª°°Ð6ÆÝóGa, IDATÝ]f³ÙaXÄz%OOÏK1ÍËÖ=÷Ü£^xAÑÑÑÊÏÏ×K/½tÖ…÷ýë_5|øp…††êÚk¯Õ¾}ût×]wé8ëùúõë§Ç{L/¾ø¢üüü´aÃýë_ÿ:çÚÂÂÂôã?ªGZ»v­î¾ûneddhΜ92dˆNŸ>­¾}ûÊÕÕUëÖ­ÓĉõÊ+¯Ôý]B5ÍÅ‘^½ziæÌ™zì±ÇªôôtÍ;÷œÏýïÿ[Û¶mÓÏ?ÿìðõº^Ï9sæ¨OŸ>òööÖàÁƒåìì¬ÄÄD=óÌ3uªcöìÙêÛ·¯|||4dÈ™L&}þùçš?¾¾ÿþûsžßÙ+77W?ýô“Þxã åææê›o¾±[äߪU+=óÌ3JHHÐO<¡Þ½{ËÃÃCZ¾|¹^{í5­^½Úî¸ãÆÓóÏ?¯N:ÉÙÙY¿ýö›yäÝwß}vûÅÅÅÉd2)))é¢ÍñrW\\¬ìììj¿cªþ®ÉÈÈPYY™mŒ»»»Ý˜˜j¿k"##åììlàÌh\„•––êÙgŸÕ³Ï>«.]ºè¿ÿý¯ºvíjtYΑ«««BCCªØØØ÷³vqô©íÉÉÉZµjÝFΑuþÖŽ’”˜˜¨… ªwïÞ:~ü¸ÂÂÂ4lØ0½óÎ;µ«sçÎz÷Ýw5cÆ %''+00P“&MÒý÷ß_ëù$é…^дiÓÔ½{w:uJ×_½^ýuµjÕêœj›?¾n»í6eee©U«V¶pBTT”>þøc=ú裺çž{T^^®Ž;êå—_ÖСCÖ)Õ­Kƒ£¹Õ4ßúì[Ó\í¿dÉ=üðÃêÙ³§òóóÕ®];=ñÄZ²dÉYÏåhÛ´iÓTRR¢æÍ›;œs]¯§ÅbÑêÕ«5cÆ M:U&“I½zõÒÒ¥KSmÎgjÛ¶­V¯^­™3gjúô钤뮻N«W¯Vttt­×µ¶íg:óëîææ&uëÖM·Ýv›n½õV¹¹¹U7hÐ Y,ýýïׄ TPP õë×OëÖ­S›6mlûþøãZ¼x±Æ¯ôôt9;;«C‡ºë®»4yòd»ãVTTÐ µU»z8êFe½·:³«‡ÅbQ\\œÝˆùøø8+.O¦Jú¢@½˜dÒr-×(2ºà‚øñÇuï½÷êÀš;w®¦NÊ'10Ü¢E‹4a£Ëÿ_^^žÃEÁÖmYYY*((°íïææ¦-ZÔÚm$22R^^^Î š–3C€ŽBééé*//·9³«Ç™?ËCCCEÀµJNN–¤ZCªãYÿíóC}hp%À“ɤåË—kÔ(ûõËt ‰*++Óßÿþw=ýôÓŠ×'Ÿ|¢víÚ]€Èl6+66ö¼»œ¹ÐØQ·‘3»Ž³Ð΢¦àžõ~ß¾}ÊÏÏ·í_5¸¢˜˜ <Øîgq«V­äíímà¬ÀÙ  Ú¹s§ÆŽ«½{÷êµ×^Ó½÷ÞktI9Y,Y,–Z÷«mÑrJJÊY-;ú¤z-¸\Õ¶«©«Ç™a»˜˜7ް—!!4!•••z饗ô裪[·nÚºu«Ú´ictYšsé6Ru!ôÎ;•˜˜Xm´»»»Ã°HÕû¨¨(@h0ÎÖÕcïÞ½*((°í_5 g±X[íg]dd¤¼¼¼ œ¸”„ÐD;vLwÜq‡Ö¬Y£'Ÿ|R>ú¨œ. ª©O·‘š>5?%%ÅöÜÊÕÕUþþþµv‰ˆˆÏÅž"€ÿÇÞ½‡GYßùÿN"$’ptÛ5¶"¡ ruÀzµ\mQ»µ´Õ*«®¢ßï¶Z»þ„µkUöÛ֭ʵmѵ«ˆZKZEÀb%(­ÐZK€œ9†s8$Ìï:Ó&' ¹“ð|pq Üsßs¿?sºoÈçu¿;±ætõØ´iÕÕÕñmìê!I’$I’$I’Ž…I’NùùùLŸ>C‡ñÖ[o1vìØ K’$I’¤ã–‘‘AFF999 ®sàÀJKK^¿  €¼¼<ŠŠŠ8|øp|›æt2dˆ!{é$sèÐ!¶mÛÖ`£ÒÒRJJJZÜÕcèСtïÞ=À‘I’$I’$I’¤ŽÊ@ˆ$IÜO<ÁÝwßÍE]Ä‚ èÛ·oÐ%I’$IR›IMM=aÝFÊËˉF£@ân#u»Ž 8ž={¶Å0%§XWDÝ<šÛÕ#77·^,++‹P(àÈ$I’$I’$IRgf D’¤NêСC|ó›ßä¹çžã»ßý.÷ÝwIIIA—%I’$IíRkw©±ÛˆÔ6uõ¨ú(..f÷îÝñmê~nëvõ‡Ã 4ˆ.]º82I’$I’$I’$!’$uJÛ·oçšk®aÕªU¼ôÒK\yå•A—$I’$IÞ±t©;=//¯^·8HI‰Ý>œ^½zµö¥¥9]=6nÜÈ‘#GâÛÔìê‡;vl½Ïœ]=$I’$I’$IRGa D’¤NfíÚµL™2€ßýîwœuÖYW$I’$I'—æt9xð Û·oO8™½  €åË—³iÓ&öíÛߦnׂD]GÌ)§øß¾êØj~>êêQTTÄž={âÛ4ÔÕ£æçî’$I’$I’$©³ñ'ƒ’$u"ï¾û.S¦LáÌ3Ïä—¿ü%§vZÐ%I’$I’HII!;;;>q½!‰ºÄnW¬XAii)›7o®×¡±n#áp˜ŒŒŒ¶¦TOcïéXè#Ñ{º©®ÙÙÙŽJ’$I’$I’$)B$Iê$òòò¸ú꫹ð Y¸p!iiiA—$I’$I:N'ªÛHaa!{÷îoc·h½c·M½cA߇’$I’$I’$IÍãOQ$Iê,XÀW¿úUf̘Á“O>éD I’$I:‰´v·‘Da;3œ\½wê†>ÊËˉF£ñmjvªIÔÕcøðáôêÕ+ÀQI’$I’$I’$u|Ε$©ƒ{î¹çøçþgî¾ûn~øaB¡PÐ%I’$I’Ú¡æt9tèÛ¶mK8ñ¿  €·ÿô6Eʨ[ YÀ@HMIÜm¤fdРAtéÒ¥í«f9pà;vìh°«GAAEEE>|8¾MÝ®‘H¤ÞknWI’$I’$I’¤¶áOd$IêÀ^xán¾ùfî¼óNæÌ™t9’$I’¤®k×®µºìboó6yäñ>ïóþB*©L<2‘Q;GqQÞEl-ÞZ+DŸŸÏâŋٸqcƒÝFê:’••å…NÙÕ#¶làÀôìÙ3ÀQI’$I’$I’$©&!’$uP?ÿùÏ™1cwÝusçÎ ºI’$IR'PEkXCÞß~½Ã;âaÂDˆð òy>OzR:ô.jø±u©LÈÏϧ¸¸˜Ý»wÇ·©Û}Ân#õ8p€ÒÒÒzÏéñvõ2dÉÉÉŽL’$I’$I’$I-e D’¤èÍ7ßäÆodÖ¬Y†A$I’$IÇ¥€‚xä×üš]ì"‹,Æ1ŽyÌã ® ›ì?nÝn# ©¬¬Lؽ¢f·‘M›6Q]]ߦ³v©ÙÕ#Ñó»éÚµ+}úô‰9QWAƒÑ£GG%I’$I’$I’¤Öb D’¤æü#×^{-×\s ßÿþ÷ƒ.G’$I’ÔÁ”SÎ2–‘G¯ó:ÅÓ‡>\ÄE|—ï2ŽqäÒp€ãDKKK#‡\§9ÝFJJJصkW|›””z÷î0,»:t(Ý»woõ1ÆB/5kß½~=,//+,,¤ªª*¾M¬«G¬öœœ»zH’$I’$I’$©!’$u ÅÅÅ\vÙeŒ9’Ÿþô§îJ§’$I’¤¶·—½¬de¼ H>ù¤‘ÆXÆò-¾E„çpI$]jƒZÚm$Q—–t©Ûu$33“¤¤ÄÏOS]= ¨¨¨¨5–Ü^½ÈÛ¾ÇGŒ #7—)S¦ÔÚ÷àÁƒIOO?qO $I’$I’$I’:%!’$uû÷ïçŠ+® OŸ>¼üòˤ¤¤]’$I’$©ª¢Š5¬‰@–²”jª9‡sˆasÇ8RI ºÔ®9ÝF>ÌæÍ›)**¢¬¬ŒââbJKK)--¥¨¨ˆ÷ߟÂÂBöïßß&55•M¯^½(//§¤¤„òòrŽ9_¯_¿~dff2hÐ ²³³5j ++‹Aƒ‘••Åi§vtåÛnã¾ùóá¿þ FnµçD’$I’$I’$I—I’:ˆ[o½•ÂÂBòóóéÑ£GÐåH’$I’Ú‘ â7y“Ýì&L˜f2“‹¸ˆ>ô ºÌv¡K—. 86ºÞ®]»())¡¤¤„ÒÒRŠ‹‹)++£¢¢‚3Î8#ð0`@<,Ò¢‹7<ölÚS¦ÀÊ•0|øqŽL’$I’$I’$I'!’$u?üáyþùçY¼x1Æ ºI’$IRÀÊ(c9ËÉ#Å,¦”RúÒ— ¹Gx„ILbþûñxôìÙ“ž={ræ™g¶Î’“á¿ÿƇ©SaÅ èÕ«uö%I’$I’$I’¤NÉ@ˆ$IíÜ|ÀwÜÁ·¿ým.½ôÒ Ë‘$I’$`{x÷â]@òÉ'4Æ2–Û¹Îá’H ºTµDz:¼þ:œw\{-¼öœâÛK’$I’$I’$©yüÉ’$IíØÁƒ¹á†3f ßùÎw‚.G’$I’ÔFª¨b kâ·y›(QF0‚æ0‡qŒ#•Ô KÕñÊΆW^ à߀gž º"I’$I’$I’$uB$IjǾóï°aÃ^~ùe’’¼Ê«$I’$uVG8Â|€,g98@˜0"Ìd&s1½ét©j #GÂÂ…på•ðÿwÝtE’$I’$I’$Iê „H’ÔNýîw¿ã?ÿó?ùñÌðáÃ.G’$I’t‚P€ü–ß²íœÆiLd"ó8“™ÌP†]¦ÚÊå—ÃܹpÏ=ÃÕW]‘$I’$I’$I’Ú9!’$µCÕÕÕÜzë­\|ñÅÜ|óÍA—#I’$I:¶±·x‹<òXÂ6°ntã|Îçnî&B„‘Œ$D(èR”»î‚‚øÒ—à­·à¼ó‚®H’$I’$I’$Ií˜I’Ú¡gžy†?þñ|øá‡A—"I’$I:F•T²‚ñ. ð!BŒ`Ó™N„ãO )A—ªöä‰' °®º Þ{º"I’$I’$I’$µSB$Ijg***øö·¿Ím·ÝFNNNÐåH’$I’š©šj>äÃxd9Ë9À„‰a6³‰!ƒŒ KU{–œ ?ûŒ—^ ï¾ ={]•$I’$I’$I’Ú!!’$µ3<òßùÎw®D’$I’Ô” âßðv°ƒ~ôã.àqçó|žÁØáA-”ž¯¿ç×^ ‹Ã)þw¾$I’$I’$I’jó'H’$µ#»víâ‡?ü!÷Ýw½zõ ºI’$IR[ÙÊÛ¼Myüš_³‘t§;cÃ=ÜC„#IˆPÐ¥ª£0^y.¸¾ùMxúé +’$I’$I’$IR;c D’¤vä?øÉÉÉÜrË-A—"I’$Iö³Ÿwy7Þä> DˆŒàZ®%B„ L +]ƒ.UQn.<ÿäÃxdË8ÈA„‰a6³™Ìdzâdzu0=zÀ¢E0z4\wÝÑ?ŸâóK’$I’$I’$¬üI‘$IíÀ+¯¼ÂÕW_M( ºI’$Iê (ˆ@–°„ì$“LÆ3ž'x‚˸Œ ºLéø  ‹ÃÀ­·Â“O]‘$I’$I’$I’b D’¤€ýõ¯åã?æ©§ž ºI’$Iê06³™wx‡<òø¿¢BNåTF3š{¹—F2’ïÕ Ï=×^ g·ÝtE’$I’$I’$I €I’¶hÑ"úôéÃùçŸt)’$I’Ôníc¿ãwñ. «YM2ÉœÍÙ\ÇuDˆpÐ….A—*µk®ÿw¸ã2¦N º"I’$I’$I’$µ1!’$léÒ¥\tÑEœrЇeI’$IŠ©¦šù0y‡w8Ä!„‰a6³¹„KèA K•‚sß}°i|éK°lŒtE’$I’$I’$IjCÎ<•$)`ï¿ÿ>wÝuWÐeH’$IRà (ˆ@~ͯÙÅ.²Èbã˜Ç<.çr0 è2¥öeÞO:éA—*u|ÆÁâÅ0q"Ü}7<úhÐI’$I’$I’$©•‘$)@7ndذaA—!I’$I'LñÈ›¼ÉnvÇ 3™ÉE\Dú]¦Ô9}îsðì³G»„ ·ÞtE’$I’$I’$IjEB$I І :thÐeH’$IÒ1+£Œå,'<^ã5J(¡/}¹ y„Gˆ!L8è2¥“Ç´iðç?ìY0x0L™tE’$I’$I’$Ij%B$I Pqq1cÆŒ º I’$Ij¶½ìe%+ã]@òÉ'4Æ2–Û¸Îá’H ºTéäõoÿý+|éK°lœ}vÐI’$I’$I’$©øSYI’´gÏzôèt’$I’Ô *ªÈ'Ÿ¹Ìe“èMo.áòÈ#B„%,a;XÂf3›\rãaÑ£GóÚk¯%|Ü5kÖ0pà@ªªªXºt)çž{.©©© :”ùóç×ZÏž=ÜsÏ=œqÆtëÖž={2iÒ$/^ܺO€Ô…BðÌ3ð¹ÏÁå—Cqq37 Q]]Í¿ÿû¿3tèPRRRø‡øþë¿þ«Þº¯¾ú*£G¦{÷îtïÞ½ÑÏ»$I’$I’$I’Z‡I’´oß>ºuët’$I’TK<ÅSLg:½éÍ(FñO&Ìóßle+«XÅæ!B*© gÖ¬Y<öØc ï›7oßøÆ78å”Søøã¹æšk¸ãŽ;زe /¼ð=ôo¿ýv|ýo¼‘ªª*òòòصk6l`Ö¬YÌ›7¯5ž©ãëÒ^|N=®¼öíkÖf·Ür •••äåå±cÇž~úi~ðƒ°páÂø:+W®ä¦›nâöÛogãÆlذo}ë[̘1ƒ÷ß¿µF$I’$I’$I’¤:N ºI’Nfû÷ï7"I’$)pe”±œåä‘ÇbSJ)§q™È÷ù>“™ÌP†¶øq¯¹æî¾ûnÖ®]KNNN|ùöíÛy饗øøãx衇¸÷Þ{¹îºë8÷ÜsùÁ~À#<ÂĉX²d %%%ñ.‹½{÷æŠ+®àŠ+®8¾ÁKYïÞðúë0z4\{-¼ò $'7ºIÿþýyðÁã¿à‚ ˜7o>ø ×^{-sçÎå{ßû×_}|½/ùËìܹ“9sæðÒK/µÎx$I’$I’$I’T‹B$I ÐÁƒIMM|%]I’$Ij-ÛÙΫ¼Ê½ÜË(F‘M67qp;·³ŠU”SÎ ¼ÀLfS K—.ÜrË-õº„<óÌ3L™2…~ýúðî»ï2uêÔZëL˜05kÖÄÿ~æ™gò¯ÿú¯”””S-ÒI+†_þòòàÞ{›\ý¦›nª·l̘1üå/‰ÿ}õêÕL™2¥ÞzS§N%??ÿ¸Ê•$I’$I’$IRóÙ!D’¤E£Ñ K$I’t¨¤’¬ ïo¿>àB„Á"D˜ÃÆ3žRNø¾gΜɧ>õ)~øaúöíKuu5?üáyñÅãëñÿðõ¶MJúûõl~ñ‹_pçwrúé§3tèPFÍUW]ÅÔ©S …B'¼n©S7ž{®»† ƒ[nipÕ¡C‡Ö[–‘‘AEEEüï›7oŽºjêß¿?ååå'¤dI’$I’$I’$5Í@ˆ$I’$I’ÔÉáðA<²œåàaÂDˆ0›ÙDˆAF«×Ò·o_¾ð…/ðä“Oòÿïÿå•W^!33“sÏ=7¾N=øÃþ@VVVƒ3lØ0þ÷ÿ—ƒòç?ÿ™÷Þ{‡zˆE‹1þüV‡Ôá]{-üéOpûí0x0\qEÂÕj±Ò¿¶lÙ Aƒj-ß²e ™™™'¤\I’$I’$I’$5­éŸìH’$I’$Ij÷ (à)žb:ÓéG?F1ŠGy” 2xœÇÙÈFÖ³ž'y’iLk“0H̬Y³øÑ~ÄáÇ™7o·Ýv[­û/¼ðB^yå•f=VJJ gŸ}63gÎä7Þ`áÂ…­Q²Ô9Ý?\ýÑßøÃ1?ÌÈ‘#yõÕWë-_´h#GŽ<ž %I’$I’$I’Ôv‘$I’$I’: mlã-Þ"<–°„ l Ý8Ÿó¹›»‰a$#  ºTÎ:ë,>ýéOsÿý÷ó§?ý‰éӧ׺ÿþûïgÒ¤I¤¥¥qÅWššÊÊ•+yôÑGyíµ×˜0a3gÎdâĉdff²mÛ6æÍ›Ç„ ‚’Ô1…BðÌ3P\ S§Â{ïAÿþ-~˜»ï¾›+®¸‚^½z1iÒ$–,YÂw¾óÞxã]µ$I’$I’$I’`‡I’$I’$©¨¤’<ò¸—{Å(úӟ븎|ò™Ît–°„ì` K˜ÍlrÉma˜Y³fñðÃóµ¯}®]»Öº/''‡×_… 2lØ0N;í4zè!î¼óÎø:>ø /¿ü2#FŒ ==ñãÇS]]ÍÏþó¶ŠÔ±uí /¾))pùå°o_‹âüóÏç'?ù =öC† aÈ!<öØcüô§?åÜsÏm…¢%I’$I’$I’”ˆB$I’$I’¤v¨šj>äCòþök9Ë9À„‰a6³™Ä$zÑ+èR›%‰ššÊ7¾ñ„÷1‚×_½Áí'NœÈĉ[©:é$Ó§¼þ:Œ7Þ/¼IID£Ñ7©{ßUW]ÅUW]ÕÚ•J’$I’$I’$©B$I’$I’¤v¢€‚xä7ü†ì ý¸€ xœÇù<Ÿg0ƒƒ.³ÅvîÜÉc=Æ 7ÜÀ€‚.GÀðáðÒK0iüŸÿsæ]‘$I’$I’$I’ZÈ@ˆ$I’$I’­låmÞ&<~ͯÙÈFºÓ1Œáî!B„‘Œ$D(èRY(¢[·n\rÉ%<ûì³A—#©¦ñãáÙgáúëaèPh ƒ$I’$I’$I’Ú'!’$I’$IRÙÏ~ÞåÝxÕ¬&‰$F0‚k¹–&0®t ºÔ&]‚¤ÆüÓ?ÁÚµpûíG»†LštE’$I’$I’$Ij&!’$I’$IR+©¦šù0YÆ2r0a"D˜Íl&3™žô ºTI'³„ášk`ùrøÌg‚®H’$I’$I’$IÍ` D’$I’$I: (ˆ@òÈ£‚ úÓŸ Là žàR.eƒ‚.S’þ.‚§Ÿ†‚˜:V®„þýƒ®J’$I’$I’$IM0"I’$I’$‡-la)KÉ#_ñ+ )¤;ÝÃf3›F2’¡ K•¤†¥¦Â¢E0f \q,] ݺ]•$I’$I’$I’a D’$I’$Ij}ìãwü.Þd5«I&™íP§Ê IDAT³9›ë¸Ž&0®t ºTIj™>}ކBÎ?n¼.„¤¤ «’$I’$I’$IR „H’$I’$I¨¦šù0y‡w8Ä!„‰a6³¹„KèA K•¤ã÷éOÃË/ÃäÉðíoÃC]‘$I’$I’$I’` D’$I’$Iª£€‚xd KØÉN2Éd<ã™Ç<.çr0 è2%©uL˜?þ1|å+0x0|ýëAW$I’$I’$I’¤ „H’$I’$餷™Í¼Ã;ä‘ǼAEœÊ©Œf4÷r/"ä’t™’Ôvnº þò¸í68ýt¸øâ +’$I’$I’$IRB$I’$I’tÒÙÇ>~Çïâ]@V³šd’9›³¹žë‰á.  ]‚.U’‚óÐC°i|á °bœuVÐI’$I’$I’$©!’$I’$Iêôª¨b kâwx‡C"L˜f3›K¸„ôºTIj?B!˜?.º¦N…•+¡_¿ «’$I’$I’$IÒß‘$I’$IR§T@A<òk~Í.v‘EãÇ<æqWMvÐeJRû–š ‹Á˜1G;…äå]&I’$I’$I’¤À‘$I’$IR§PN9ËXFy¼ÎëSL:éœÇyÜÇ}DˆKnÐeJRÇÓ·ïÑPÈØ±pà °páÑî!’$I’$I’$I ”I’$I’$uH{ÙËJVÆ»€¬f5©¤2’‘|‰/!ÂD&rŠÿ&IÇïÿþ÷aòd¸ÿ~xðÁ +’$I’$I’$I:éùÓpI’$I’$uUT±†5ñÈR–RM5çp"ÌaãG*©A—*IÓÀ~7ß §Ÿ~´[HLq1LŸ_ýêÑß’$I’$I’$IjuB$I’$I’ÔnP€¼É›ìf7aÂDˆ0“™\ÄEô¡OÐeJÒÉãŸÿ>þøh(dà@¸è"X½.½¶lÍ› „H’$I’$I’$µ!’$I’$Ij7Ê(c9ËÉ#×xJèK_.äBá&1‰a ºLI:¹Í™EE0mÌ ³fÁ¡CGï+(€+`ìØ`k”$I’$I’$I: ‘$I’$IR`ö²—•¬ŒwÉ'Ÿ4ÒËXnã6"D8‡sH")èR%I1¡ÌŸçŸ_ÿúÑeG޽íÒžyÆ@ˆ$I’$I’$IR0"I’$I’¤6SEkX€,e)G8ÂF!Âæ0Žq¤’t©’¤†TWÃìÙðá‡õï;|~ñ xüqèÑ£ík“$I’$I’$I:‰‘$I’$IR«*  ù¿b{&B„™Ìäb.¦7½ƒ.S’Ô{÷Âôéðæ› ¯ …ÌœÙvuI’$I’$I’$„ „H’$I’$鄊@–³œ<ò(£ŒÓ8‰Läû|ŸÉLf(Cƒ.S’t,.¹Þ}·ñu¢Qøñ „H’$I’$I’$µ2!’$I’$I:.ÛØÆ[¼ïR@ÝèÆùœÏ,f!Â9œCIA—*I:^7ÞøI’$I’$I’¤“„B$I’$I’:©ÿáø_c';Íh†2´ÙÛPïò~ÃvÐ~\À<Îã\Â% aHë/IjÿÎ9V¬€W_…o~ÊË¡ºúï÷ïÙ/¿ ÿôOÁÕ(I’$I’$I’Ô‰‘$I’$IÆöíÛÙ¼y3›7of×®]ôë×ÌÌL233éÖ­[Ðåµ;ØÁ-ÜÂB"D2Éä‘ÇÍÜÜà6[ÙÊÛ¼My,a Ø@7ºq>çs÷!ÂHF"Ô†#‘$uS¦@$r´kÈ …>|´kÈ“OW dëÖ­”——SXXÀàÁƒÉÌÌä´ÓN;AÅK’$I’$I’$u\B$I’$IR öïßOYYååålÙ²…ÒÒR¶lÙRkYII [·nåàÁƒ >Ω§žÊ€èׯYYYdffÒ¯_?²³³ë-;å”Îû_"¿âWÜÈTP@”(!B,aI­@È~öó.ïÆ»€|À„1‚Lg:"Œg<)¤5IRG’–³g Üy'üò—pä,] 6À°aµV¯¬¬¤¤¤„²²2Š‹‹)++£¨¨ˆ²²2JJJ())¡´´´ÁcJJ ÙÙÙ 0€••Å AƒÈÊÊbàÀdee1`ÀÒÒÒÚbô’$I’$I’$Iè¼³$I’$IR ***(--¥¬¬ŒÒÒR***â®»¬¦ÔÔT²³³ÉÊÊ";;›óÎ;ŒŒŒZ˲²²èß¿?»wïnpëÖ­#//²²2***ÝGVVVÂ}dff’””Ô–OÛ1ÛÍnîâ.æ3Ÿ!Žp$~_U¼É›¼Ïûü–ß’GËXÆA&L„³™Í$&Ñ‹^ŽB’Ôá BÅ3ϰkÊN»ÿ~º²|æL„õŽÙåååD£Ñøfñcð™gžÉÅ_œðX]÷\¢¬¬Œ‚‚Þÿ}-ZDQQ‡Ž?nÝc~8®õ˜ÙÙÙ 2„äää ž-I’$I’$I’¤ãb D’$I’$5[eeeƒ¡ŽšË¶lÙBuuu|»ÔÔÔZ‹p8ÌØ±cë…0H×®]›]OFFäää4Ywc”üüüøÊ:Tou'ŽÖ 0€^½‚ R,g9×s=å”ýÛ¯ºv±‹s9—l²‰áiž&B„,²¨X’Ô8p€ÒÒÒzŒšÁŒšŒdàk]º°sÝ:Š*+ÉÎÎ&‰Ô;®<¸ÙÝ»Âá0áp¸Ñuê†Rcµ•––’——×` %QX$v;|øð@õ’$I’$I’$I‰‘$I’$é$wàÀvìØÑdУ9a‰p8œpeFFF@£;*--´´4²³³ÉÍÍmtݦB/ëÖ­kV襡Î#- ½4Z+•|—ïòüI$QMuƒëv¡ ÿ¿ðüÇ Ù·$©s©¢ˆ…'…>bºvíJŸ>}ê…=kvá8p ={öló±4'0Zóü§îx X¾|9›6mbß¾}ñmu«Ûu¤%áI’$I’$I’¤ãåO%$I’$Iê¤ê^»±5Õ옛››0èп’““]ëIKKkÖÕÇ¡ñç86™´9Ïq¢àHSÏñJVò%¾D!…D‰6¨¢Šø ùO„$©SHt¬‚(,,¤ªª*¾Mì8 :äääÔ A 2¤CŸÄÆØTX4Q·‘ØíŠ+(--eóæÍ9r$¾MÍÀlC]G²²²…Bm1TI’$I’$I’Ô‰‘$I’$©iª{E충x× ! 4ˆ.]º8ÂŽ¥9W ‡ú]Xê¾fùùùTTTPXXÈÞ½{ëí£npä£k?â­1oA¢D›Uk”(ËXÆA’BÊ1Y’Ô>:tˆmÛ¶5ö())a×®]ñmRRRèÝ»wü˜’““Ô)SjgLzzz€#k_šs¬OôZÄ^ƒX@´¸¸˜Ý»wÇ·©ynÖPhdèСtïÞ½-†)I’$I’$I’:(!’$I’$¬²²²Ñî±eEEE>|8¾]×®]éÓ§O­PGì Þ5— 0€^½z8B5÷*äÐtègݺu|4õ#¢[¢ÐH­óQHªN" AI:B4t44rƒ¼Sõ“N™Ô:•$±cAÍ.uo7mÚT+üY7@˜››[/h™™IRRR€#뜺víÚ¬ã|Ýc|Í×7??ŸÅ‹7ùºÖ¼½¾¾®’$I’$I’$¼ „H’$I’Ô bWŠn*èQZZÊÎ;km›ø uäææÖêá¤ÎÎ---p8L8nrÝŠŠ 6lØÀÇÛ>fýÎõ”.£ø`1å‡ËÙÝÆÎäTv¯¤²G%d=`òW&“º"µÞÄÒºÝb²²²èß¿?ÉÉÉ­?hI:I3vìØz“ðЩ–ÈÈÈ ##ƒ‘Œlt½°cÇŽ£ïÏY¥TÜP;¨”ŸŸOEEE½Iȱ}4‰­ …Zs¸’Ô®UTT4ØÍ#6¹¿îùAÍïØØ¹A¢N:¹ddd››Û¢n#5ßkk×®%//ÂÂBªªªâÛÄÂEuÃ"5o‡ bT’$I’$I’¤Ä@ˆ$I’$é¤WYYÙd'²²2ŠŠŠ8|øp|»ØÕ˜kNÏÉÉ©7i~àÀôìÙ3Àêd›»²|cjN0Mô¹(((`ñâÅ '™Ö|ß7É´¡ÏYCMkN”n(8âdiIm¡±Éò5ocºvíJŸ>}êuüªö0ªö$ÖM,''§Áujv«û9(((`ùòålÚ´‰}ûöÅ·©zªÙéÆÐ“$I’$I’$ImÇÿ‰—$I’$u(MM@ýyóæÍ9r$¾]Ý è5'p:]:6±I¦M‰´ê<²víZ–/_Nqq1»wï®·†‚#5?¿YYY„B¡Öª¤¦f·£ºÝÈÏÏgñâÅõ&rǾšê<â÷Ô±:tˆmÛ¶%œ»]¿~=;wîŒoS÷û!''‡)S¦Ôú~ï5oý¼K’$I’$I’NVþD[’$I’Ô"MMÀŽý¹¼¼œh4ß®îìp8ÌØ±c€-é˜Å®TÞ”¦j±+–—””°k×®zûh*8b@Mj\cbËŠŠŠØ³gO|›ºrssëMþ4h]ºt pd’ŽUs ±ãt¢ØºuëšìT3,bG I’$I’$IRgä I’$IRü ½M= ©ªªŠo—ššZkBtFF999õ&L;éªó™4iK–, º ©Yj^­<''§Ñu+++~ÆncW-/**âðáõöÑ»wï&ƒ#¤gÏž­=d©ÍÄ‚¢‰>3±IÜu»ôÔ¼ÊÝ€¨!+I5edd››Û¬n#‰¾ƒÖ®]K^^^ÂÇ$ ‹Ô¼2dÉÉÉm1LI’$I’$I’Ž™I’$Iê¤8ÀŽ;š z³{÷îZÛÖœ¨™••E8N8Á¹-&kÖ}üÔÔTúôéÃ9çœÃ¿øE®¿þzºvíÚª5œh±1Õì ÒÒíuÛcÙÔ¯µæÄÞýØíí1[K[¾Žu÷ ­ÿµöø~ûÛß2oÞ<–.]Juu5gœqßúÖ·¸ñÆë{)--´´´xG‚Æ4Õ1iݺu 'Ã×í˜ÔP€Äî Ò(--m0ìQZZʦM›Ø·o_|›º]=bAš®[»XG?oh軳=ß:Ëq¤¡}ÂÉõ<·õ¾[ãøÝ’n# u)гkv9ìÒ¥ }ûöM‰Ý>œ^½zSÝ’$I’$I’$B$I’$©ƒ©{%î†&"לÌõ' 'êäÑ5[*ÖšXYYIyy9¿ÿýï™?>ÿùŸÿÉ/ùKÎ8㌀+m¾Ø˜:‚†jýÍo~ÓjÝÞ³³é,ÏÑÅ_Ìg?ûYÞ|óMÎ:ë,>ùän½õVŠŠŠøö·¿Ý&5ddd‘‘Ñäz‡bÛ¶m ~_°|ùrJKKÙ¹sg½}4ÉÊÊ"33“¤¤¤Öª:™Æ&EÇnkžGtíÚ•>}úÔ ŠÖíêÑ^&EwÆóh_ßÝí©–­=-ÈZZsßA¿cÇíÆº…5†‹¯› ÃÕì€ÔVa8I’$I’$IÒÉ+m—º’¤$Dˆ…,d:Óƒ.E@(báÂ…LŸîûI’jzê©§˜9sfÐe´©ÊÊÊ&eeeRUUß.55µÖ„à†& <˜ôôôGx|»BôSO=ÅÃ?̇~HÏž=Û¸²cwúè#JJJ¨¬¬¤´´”U«VÅ÷1gÎf͚ŴiÓâûèÈa¦Ìœ9“É“'óýï¿Öò>úˆË.»ŒôôtÒÓÓ¹ì²Ëøè£êmÿÑGqùå—Ç×ûüç?_o½]»vqÇw‡IMM%++‹›nº‰ßÿþ÷MÖ·víZ.»ì2N=õTzöìÉÕW_MaaaÂu-ZÄØ±cIMMeèСÜqÇìÙ³§Ö:± b¡P(þûæ›onÑco­±ý&Z¶~ýz¾ð…/‘‘‘p½D ¹úê«éÙ³'§žz*—_~9úÓŸê­×œ×ª!Íy?Äê-**âÊ+¯$==þýûóå/™íÛ·×{ÌšÏW=¸ä’KX·n]³ÆÝÔëØÜ÷o"kÖ¬aòäÉtïÞ=zp饗òæ›o6YO¢š-oÎç¡9ãkêµlê=FkM&èÓ§lƳÔ~¥¥¥‘Mnn.S¦LaæÌ™<ðÀ<þøã¼ð ,_¾œõë×sèÐ!öïßÏúõëY¶l ,`öìÙL›6p8LEEyyyÌ;—¯|å+Lš4‰³Î:‹^½zÅ÷1jÔ(¦L™Â׿þõø>^|ñE–/_NAAA­À‰‚WQQ?§xê©§xàøú׿Δ)S5jµÎ'f̘ÁܹsÉËË£²²’œœœzç»w零²’õë׳dÉ’„çáp¸C…AšÒžÏšúîš}Œ:žcæÉriLsŸçæœwÏyd"Í}m›{NØ’sUßë4‰D¸á†˜={vüX½jÕ*JKKÙ¿?%%%¬ZµŠE‹1gΦL™BVV¼øâ‹üë¿þ+S§NeÔ¨Q 0 þ=>nÜ8¦OŸÎ¬Y³˜;w.Ï?ÿ÷ÜsÑÇ{,:{öìèŒ3¢‘H$zæ™gF³²²¢¡P( ħ¦¦FÃáptìØ±ÑiÓ¦Eo¿ýöèý÷ß}òÉ'£‹-Š®Zµ*ZRR­ªª zˆíNSÿ|}ÿý÷£ŸùÌgâÿä“O¢ŒÎŸ??ºyóæèæÍ›£O?ýttÀ€ÑO>ù¤Öz™™™ÑýèGÑ-[¶D·mÛ]°`A4G‹ŠŠâë]yå•Ñx Z^^=pà@tõêÕѱcÇ6Y×_ÿú×è Aƒâulݺ5º`Á‚è˜1cn DŸ|òÉè¾}û¢eeeÑn¸!zã7¶èùhîcœˆZ-›4iRtÅŠÑýû÷G_ýõ&Ÿ# :f̘è‚ ¢[·n¿VƒŽnذ!¾^s_«Dµ5÷ýÛvòäÉÑ7Þx#ºgÏžhaaaôꫯŽÞtÓM>_Û¶m‹þüç?žuÖYM޹¡:¥Þº>þøãè§>õ©è’%K¢ûöí‹®]»6:~üøzûjîkšhys?¯%¯eKÞSwÞygôòË/oðþ“ÕÁƒ£%%%µŽ]sæÌ‰Þ~ûíµŽ]½zõªuÜjɱ«ºº:èavXû÷ï®_¿¾Þk3mÚ´èØ±c£áp8šœœ\ëuÉÈȈžyæ™ÑH$1cFtöìÙÑÇ{,ú /øšD;îyCSµ':FM›6­Þ1ª%ß³uìÇ‘Ø6Íyžcë6uÞu"ß-{SµµäüÏãwë9Ž«V­Š®Zµ*è2$IM˜=ïgçE#‘HÂû¿úÕ¯F¿÷½ïE£ÑhôÏþs´o߾џýìgÑ]»vEß{ï½è°aâo½õV|ý«¯¾:zÇwD7nÜ=tèPtûöíÑW_}5:yòä¶Ž$I’$INCóMC»S’ÔL!B,da¼m²t<já%I'»§žzŠ™3g¶é>+++)++£´´4~[QQQoÙ–-[¨®®Žo—ššJFFÙÙÙdeeÅoë.8p ]»vmÓ1u&¡PˆÆþùºoß>ú÷ïÏÞ½{øò—¿Ìç>÷9fÍšUk½G}”>ø€ Ä×ûìg?Ë=÷ÜSk½Ÿüä'¬Y³†Ç€ôôt6mÚDïÞ½ãëlذp8Üh]3fÌ`Ô¨QõêxöÙgùÊW¾Òè¶pôjð§Ÿ~z½+R7õ|4ç1Ž·ÖD5„B!Þzë-&NœØ¬ÚbÛüô§?妛nªµüÑGåøÏ>û,Ðü×*QmÍ}?Ķ}ùå—¹òÊ+ãË>þøc.ºè"JJJâËz¾~ñ‹_pÝu×5ëõièulI½u]ýõ\zé¥Ì˜1£VýŸþô§kí«¡×/Q=u—7÷óÐØøZòZ6õž*,,déҥ̛7 6°råJ†Þàúj\eeeÂã_ÝeÅÅÅ:t(¾]×®]éÓ§O“ÇÄЫW¯GØv<ÈöíÛk=oµž×ââbvïÞß&55µÞóW÷vèСtïÞ=À‘µõ¼¡©Ú£6lØÀ¸qãj£Zò=[×ÉxITosžçDwÈ÷Ãñ¼¶‰jkÉùŸÇï`:tˆmÛ¶Õ;>×¼]¿~=;wîŒo“’’BïÞ½kGÂáp­cÊàÁƒÛ¤›d~~>¹¹¹­¾/IÒ±›ÎtŽ>ÂÊa+yóÍ7ÉÉɉ߷}ûvN?ýt>þøcúõëÇ 7ÜÀÙgŸÍ]wÝ_ç•W^á©§žŠwIOO§¤¤„=z´ùX$I’$Iꌚoj D’ZÈ@ˆN$!’”؉ „8p€;v4ðH4© ##£Ñ€GÍej}MMìÜ»w/™™™ñ‰™™™¼÷Þ{ 2¤Öz6làüóϧ¬¬¬Ñõ6oÞÌ…^Ⱥuë˜ó™ÏP\\ ÀyçÇÙgŸÍý÷ßÏ€Úp$’$I’$uNB$é1¢É@ˆ$%ÖÜ@È'Ÿ|²eË())aëÖ­ñÉ—›7o¦´´”}ûöÕZ¿_¿~ôë×ÌÌL233éß¿?ÙÙÙôë׬¬,233éׯ§ËÀYç IDATvZk MǨ©‰¿ÿýïùÚ׾ƚ5k8å”SØ·o)))µÖ;pàééé>|€.]ºÔš USZZû÷ï`×®]|÷»ßå•W^aëÖÿŸ½;¯ª¾ó?þÊN[d#!˜"Ú¢€Š Xp—ª¸ÔV¨Óv¬¶V¦¶Õ©c+­mÕßXm©Kíf Ój‹¸TCÝB%‚´n ²a ²Ýßpo³’°ådy=yäq’“sn>ß»Cò}ŸÏfÆŒÃôéÓùÚ×¾F\\\«uµVGKcÚ¼y3ßýîwyþùç)--m4yº½.æ6§ÖƒY×–Öö9ÔǪ¥Ûlïóá@õ´÷6tíÝî`êmiß?þ¸ÍIô‡3‘·½¯‡ÖnïpËöŽEKMM ååå”””PZZJyyyäØ^×Þcwjj*C† !==Ï~ö³G­SÆÞ½{yꩧذa›6mbÓ¦M‘Ž¥¥¥‚¤ýúõ#++‹ÌÌÌH%##ƒììl222ÈÊÊ"--í°&ÒêàtÕó†¶joïûôÁ¼Ï6Õ#í[KëÛ{Þu$Ÿí{{k;˜óßÝG]]eeelܸ‘’’6lعPBqqq‹¬âããIOOï† Â!CÈÎÎæâ‹/nñ9ÔIêÂû|p˃wÜq¼óÎ; 4ˆºº:rssù¿ÿû?ÆìëDÕô;ÑÑÑ‘sõë×sã7òüóÏ3lØ0&NœÈE]Ä…^h^’$I’¤CÐÚ|Óö_ŠJ’$I’:™»ï¾›_üâ­~àÀŒ3†“N:‰N8œœœÈ¤Ò”””¬TGÛܹs¹è¢‹"_4ˆ’’† Öh»’’ Ôh»µk×6ºtKú÷ïÏ=÷ÜÃ=÷ÜÖ-[X´h÷Ýw¯½öüã[ÝoРA”––¶ØE¢©3fpÜqÇQPP@VVVäêÑóòùƒ©õH«¨¨höš,))iÎjïcÕ’ö>ö6[º¿JKKéöšÞö¡Ö;pà@¶lÙrHÝ‹¢¢¢Ø»wo£ }Ͷ;Ô×CØá<–êšâââ"G›ª­­¥¼¼œ²²2JJJX¿~=………¬ZµŠÕ«WS^^Nyy9«W¯n´_LL Ï>û,guÖQ©yåʕ̘1£ÕÉÏŸüä'9餓8å”SøÄ'>™ ëùE×ÐYÏŽ”ÃyŸõ8Ò~í=ï:’χö޽½µÌù_g¹ßuøbbb"!Ʀ***">ø€%K–°bÅ þñPTTDQQQ£íccc)((`„ U¾$© 4ˆK.¹„¹sçòßÿýßüå/!===}ÁøU«V‘‘‘ÑêísÌ1<õÔSìÝ»—þóŸ,[¶ŒÿøÇ,X°€G}´#†"I’$IR` D’$IR—5wî\î»ï>¶mÛFII ÅÅÅ‘eÃuO>ù$sæÌitUý„„RRRHNN&33“ŒŒŒÈ²áº!C†0`À€G©¶üïÿþ/‹-Š\å`êÔ©<ùä“Üxã¶}ê©§˜:ujäë³Ï>›—_~™‹/¾¸Ñv¯½ö³fÍâÍ7ßöMžÛ°aYYY 4ˆ+®¸‚iÓ¦5›8ÚÔYgÅŸÿügfÍšÕhý¢E‹šm»dÉþð‡?4z¾µvïÖs‡Së‘–ŸŸßì O=õT£‰Þí}¬ZÒÞçÃÁhíþúë_ÿÚîÛhíq<œz'OžL~~>3gÎŒ¬{ë­·¸òÊ+Y»ví÷MOO§¨¨ˆc=6²îÕW_m±îö¼Zßá<–-ñêâÓž={Z<&7]WTTÔ(tÑ«W¯ÈqxÚ´i­§‡Jß¾}Zý&L`÷îÝlÙ²¥Yí%%%¬[·Ž•+WòÌ3ϰcÇŽÈ~áó‹ÌÌLrssÕ^æääЧOŸ£V»¬3Ÿ7„÷=\‡ó>Û#‡ª½ç]‡ó|hª½coomsþçñ»k«®®>à1­¥® i—]v™Ç4IêafÍšÅ9çœÃM7ÝÄ<À7¾ñFߟ2e ùË_¸öÚkÛ¼­„„FÍèÑ£™>}:999B$I’$I:‚ „H’$IêÒILL$33“¼¼¼nÛprjK“T×®]Kqq1eeeÔ××Gök89µµàHff&ÙÙÙÄÅÅí!÷xUUU”––²téRyäJKKyñÅéß¿d›Ûn»)S¦Ð¯_?.¼ðB¢¢¢X°`÷Üs/¿ürd»Ù³gsá…RWWÇ”)Sˆç•W^áÚk¯åç?ÿy£Ÿû•¯|…Ÿþô§ >œíÛ·3gΜ6'çÏž=›É“'Ó·o_.¸àbbbÈÏÏç'?ùI³mO9ånºé&n¹å233)**âöÛooñv‡ Â믿θqãx饗øò—¿Ì† ê6§Ö#-ü3>ó™ÏP__Ï‚ ˜3g¯¼òJ£úÚûX5ÕÞçÃÁhzEGGó /ðð÷û6Z{§Þ[o½•‹/¾˜ÌÌL>ýéOóÁð¥/}©ÙÄ–L›6[n¹…{ï½—°xñbzè¡·mÏ롵ñÎcÙÔ¤I“ˆŠŠ¢  à öÓ¡©ªª¢¢¢¢Í Ç† صkW£}“““Csss[=®vñññ‘+¨è£éùExbmII ………,\¸°ÅàËB#™™™äääÓCíÖºÒy´þÞy0ç}¶§GÇÁœwêó¡©ö޽½µÌùŸÇïÎkÛ¶mÍBMIMÿÛô¸œ——×옔‘‘qDBj’¤®é„Nàøãç¶ÛnãÿøG³ yÜvÛmL›6ÄÄDÎ?ÿ|zõêÅÒ¥K¹çž{xæ™g8ýôÓ¹æšk˜õ©Ð€B@£^½z…rssC“&M ]vÙe¡n¸!tÛm·…æÎZ°`Ahùòå¡M›6…êêêcWÒô>NHHeff†Î;ï¼Ðc=ªªªjq¿U«V…Î9çœPRRR()))tÎ9ç„V­ZÕl»wß}74}úôPÿþýCIII¡“O>9ôä“O6Ú&???tÑE…ŠsÌ1¡o~ó›¡;w¶YÿêÕ«Cçž{n()))Ô§OŸÐYgZ³fMd>>4|øðÐ_þò—ƒ¾Ã©µáãÑÒºöþº!¼íš5kBguV¨OŸ>¡¤¤¤Ð¹çžZ»vm³íÛóXµVC{ž­íÛÚú†÷Wß¾}CçŸ~èý÷ßEGG·kü­=Ží­·5K–, Mš4)Ô«W¯PvvvèŽ;îh×x6oÞúüç?Îc{0ç„í=ÿkïÏöø}äTUU…6mÚZ¾|yhþüù-ŸûôéÓæ±ùÎ;ï ýêW¿ ½øâ‹¡>ø T]]ÝácY¾|yhùòåþs%Iç²ýÿÂ,XB·Þzk‹Û¯X±"tî¹ç†úöíJLL M™2%”ŸŸùþK/½š>}zhàÀ¡^½z…†ºùæ›ýŒ$I’$I‡¨µù¦Qû¿)Ij§(¢˜Ç<.çò¶7–Úżyóš]YI’zº_üâ\sÍ5A—Ñ.{öìiõ é ×mܸ‘êêêÈ~ñññ 8°ÍÎ#C† aÀ€ŽPÒÁX³f çw~øaÐ¥¨‡k«+VxY^^N]]]d¿övÅÊÊÊ">>>ÀvOUUUðJï6l ¦¦&²O¸ÛHø±i©ëÈСC‰µY´$u-uõhØaª¸¸˜ÒÒRþ /99ù€¥rssINNpT­+,,h³«§$)Xá¿}Îg>°ïÿ•)))¼ÿþû 2$ÈÒ$I’$I­Ï7õ¯€’$I’tILL$33³ÍÉ-mMÎ]»v­“s¥.$**Šûî»™3gÒ»woþñpýõ×óõ¯=èÒÔMUUUQQQqÀ€GK!DØ7‰´á1$77·Õ㊂ӫW/rssÉÍÍ=àvšHœŸŸH‰ÃKIÒ¡kéÿ|ì›:uªÁ>IRà¶oßù‡aI’$I’:7{,I’$I$11±]“<¡ùDφ“~×­[GAAAd]CM'µÉÈÈ --˜˜˜£5T©GX¸p!÷Þ{/·Þz+ÑÑÑ{ì±|ãßà‹_übÐ¥©‹9Ð{~Óu 5}ÏÏËËó=¿‡HNN&99™‘#G¶ºÍÞ½{Ùºuk‹WŸKQYYÙ§¥óˆ¦WŸÏÎÎ&..®#†)IʶmÛš½Ÿ6 {lÛ¶-²}¸+dÃ0æ¤I“usÊÎΦ_¿~ŽJ’¤æ¢¢¢èÝ»7gŸ}6?þxÐåH’$I’¤6‘$I’¤N¨==¡í«Å¶ûjñêB"©¹óÎ;óÎ;/è2ÔIµÕ*¼l«+TxòhÓ÷h'å«- dff¶ÙŬ¥n# Ï#.\ȇ~H}}}dŸ¦ç '7‡×eddÕC•¤ÃÒô˜ÝR裨¨ˆÚÚÚÈ>MßGŽÙì}qذaDGG82I’MÃNƒ’$I’$©ó3"I’$I]XøJÞmMö„¶''¯]»¶]““[ dee´‡,IÙ³gÏ»w„×5 á…¯Þð}3}útÄ0%õ@m½w·ùÞ•——ç{—$I’$I’$©S3"I’$I=Dbb"¹¹¹äææ¶¹mÓ«…7œì¼nÝ: ضm¥¥¥®¨´Õy$--˜˜˜£9\Ij—ðdѶ‚ÅÅÅlß¾½Ñ¾MßóòòòZ|ÏKOO÷ áêÒâããÛ@=ÐUöÃÝFÚºÊ~KK¯²/©©u7 /Ûên”——gw#I’$I’$IR—g D’$I’ÔL{®PUUEEEE«WÉ/,,dÛ¶mlذ]»v5ûmGÂÛ8)KÒÁj«+RxYVVÖh²hÓ®H¹¹¹Lš4©Ù{Tvv6qqqŽPê|Ú>ݶm[£°HÃe¸cYIIIdûp—–Â"áÉÜÙÙÙôë×ïhQÒQ¶wï^¶nÝÚ,TÖpYTTDeeedŸ–šMß'>¾]ÝFÔ}!Üm¤­î -u1`׳…ŸW-uó/?úè#êêê"û4}^åååÙÅF’$I’$I’¤nÄ¿ÚK’$I’tˆ"“Bt5qh<1´¥ ú­M _±¹¥àHà úÙÙÙôë×ïhYkëy^×Úó¨ásfäÈ‘>$é(iog²mÛ¶µ:¹íÚµ¬[·ŽmÛ¶E¶gàÀdffÒo\?²ûdsLŸc…F|/ïzÂ!ã…=ŠŠŠ¨¬¬ŒìÓ0À(5 eggàÈ$I’$I’$IÒÑf D’$I’¤ÐÞ‰¡Ðvg‡µk×¶«³CkGììй4œz Ç½=frss[|ܽò·$uNÉÉÉäååµ»ÛȺÍëx¾ïó¼6ü5 ‡2â¡üO6l ¦¦&²OÓ°@ÓŽ™™™äääÓÃìÑÂçu­uŒ)..¦´´”P(Ù'Ü¡+##£Y§®†KI’$I’$I’$gH’$I’ÔÉ$''“œœÜævá Ak‚uëÖQPPÀ¦M›Ø±cG³ŸÑVp$##ƒôôt¢££ÖP»µ¶‚=­Mmìi©“‡ÁIê9Ù–»ßäþ†'x‚j˜Æ4îá.¾öbb¯Ýw,8Pð ??¿ÍàAKƒÜÜÜv“ôDUUU·ØÍ#|ß·Ô™:uj³ûÜã»$I’$I’$I:þUA’$I’¤.*!!ÌÌL2339rä·Ý³gσ ………,\¸°ÙÄÅ„„RRRÚ ŽdeeÑ¿ÿ£=äÀ5¼JûîÏ¢¢"jkk#ûõêÕ«ÑýÕZÐcèСôíÛ7ÀJ’:‹Ílæ—ü’‡y˜÷yŸ±Œåîàó|žRšm”蜠aWª¦]*ÂAÒ¢¢"*++#û4 1´Ôu$;;›¸¸¸£r?¡a¸¦éýÔpÏÀ#÷Oîáû©§œ+I’$I’$I’¤Že D’$I’¤ 11‘ÄÄD233ÉËË;à¶ C-Ö®]Kqq1eeeÔ××Gökzh)8Ò'VUUQQQqÀ Gј"*þ^Áî7v7Ú7Üi¥áЖƟ‘‘ATTT@#”$u%¯ò*s™ËŸø‰$2ƒ|…¯0ŠQ‡}Û Ã¤:h©ÛHx¹xñâÏš›†F222?6=Çi)ìÑR¨³áXš:333ÉÉÉ!&&&°qI’$I’$I’¤žË@ˆ$I’$Ij$11‘ÜÜ\rss¸]uu5[¶liµSFøJãÅÅÅlß¾½Ñ¾­]m¼i€$==èèèCGÓɬ­ÕYZZJ(jµ¶¼¼h)4²nÝ: Ù¸q#;wîŒìÓ0<Ú4,^æääЧOŸƒª·­ZŠ‹‹Ù´i;vìˆìî‚®%//¯Y-vî’$I’$I’$II’$I’tHâãã#W?ЄQ€ÊÊÊÈÕÄKKK)--¥¼¼œM›6Q^^ÎâÅ‹)))aóæÍÔÔÔDöKHH`ðàÁ 2„ÔÔTÒÓÓÉÈÈ 55•þýû³yóæÈí–••QRRByy9åååB½{÷ŽLÒÒÒ8þøã9ýôÓ#ÁŽ´´4233IMM%>>¾Yý?å§üˆqû˜Û© ‚y ƒH’Ï{¼ÇÏøò(uÔqð 2‰IA—Ö¦†çê6RQQAqq17n¤¤¤„ 6PRR¦M›((( ¤¤¤Y83--ŒŒ ²²²"?#++ €7R\\Üè6ËÊÊ"ûFEEEΆ ÂðáÃ9í´ÓÈÎÎnt›)))GïΑ$I’$I’$Iê B$I’$IÒQ×§OFŒÁˆ#ÚÜ6èO-//§¸¸˜òòrÖ­[Ç’%KèU\Ìø;Yž™ô™••ÅøñãIMM„?RSSÉÌÌ<è+7K,³™Í©œÊU\Å8Æ1ŸùŒbÔaÝ®$©ç©§žçyžû¹Ÿxá çîà‹|‘~ô º¼#.%%…””N8á„V·©©©¡¬¬,iúxï½÷xõÕWþᇼ?lXä¸?räÈHX$##ƒììlÒÒÒˆ‹‹ë ÑI’$I’$I’$Ë@ˆ$I’$IêTRSSIMM=àÄQæÏ‡+®àÁ;®0`*SYÎr®äJ&0;¹“YÌêÐ$I]S%•<ÁÌakYË$&1y\Â%=¾ëT\\YYY‘. -ºüò}Ëùó;¦(I’$I’$I’¤. :è$I’$I’º’,²x‰—¸™›¹‘¹”KÙÁŽ Ë’$uR¥”r ·M6ß䛜ʩ¬f5p—õø0ˆ$I’$I’$I’I’$I’¤ƒK,³™Í ¼Àb3žñ¼Å[A—%IêDÞç}®åZŽáã1¾Í·ÙÀæ2—‘Œ º¯ó:ãÏJV]–$©¼ÌËLa gp5Ôð/PH!Wp1Ä]ž$I’$I’$I’º1!’$I’$IGȦ°’•Ã1Ld"s˜tI’¤£¤€¦2•)L¡†^äE^æe¦1-èÒ$I’$I’$I’ÔC‘$I’$I:‚RIå9žãü€oñ-.á¶³=è²$IGÈ"q:§s§ð*¯FÂ!’$I’$I’$IRG2"I’$I’t„EÍÍÜL>ù,e)cÃR–]–$é0¼ÁLÝÿ¯žz±ˆ|ò#ÁI’$I’$I’$©£‘$I’$I:J&3™·x‹ã9žÉLfs º,IÒAx—w¹œË™Àö°'ÒäLÎ º4I’$I’$I’$õpB$I’$I’Ž¢Á æ9žãü€oñ-.æb¶±-è²$ImØÌfþ‹ÿâDNämÞfó( ÀŽ ’$I’$I’$Iê4 „H’$I’$eQDq37³ˆE¼ÁŒa ¯ózÐeI’Z°“ÜÂ- c¿ç÷<ÄC¬f5—qQD]ž$I’$I’$I’a D’$I’$©ƒœÁ¬d%ŸâSœÎéÜÅ]„]–$ ¨§žGy”ã8޹Ìå‡üwx‡ç߉!&èò$I’$I’$I’¤f „H’$I’$u Á æYžånîæ{|‹¸ˆ *‚.K’z´¿ów&1‰k¹– ¹òO¾Å·èE¯ K“$I’$I’$I’Ze D’$I’$©ƒEÅ,f‘O>…2†1,aIÐeIR³‰MÌd&™Hozó&o2—¹ fpÐ¥I’$I’$I’$Im2"I’$I’Ó9•¬äDNä Î`6³©§>è²$©ÛÛÃf3›c9–¥,å/ü…E,âDN º4I’$I’$I’$©Ý „H’$I’$hƒXÈBîæn~ÂO¸ˆ‹ØÊÖ Ë’¤nëE^d£¸—{ù!?d5«¹€ ‚.K’$I’$I’$I:hB$I’$I’E³˜E¬f5'q‹YtY’Ô­TPÁWù*gs6Çs<üðÃÜ{ï½Ì›7/²ÍÒ¥K¹ú꫹á†øðÃY¿~=×_=3fÌà7Þ°úÃ×~ü?ð8s?÷3•©StY’tÄma pßæÛ|—ïòWþJ:éA—¥ôôc¿$I’$I’$IR˜I’$I’Ô¥¥¥¥ñãÿ˜áÇ“””ÄgœÁ<À½÷ÞÙæ®»îâöÛoçóŸÿ<ƒ&55•«®ºŠþð‡ÜyçVäÌd&°‘Œa å¯A—$IGÌë¼ÎhF³†5PÀlfí¯·{,ý’$I’$I’$Iûø3I’$I’Ô¥]}õÕÍÖ}úÓŸæÝwß|ýæ›orÁ4Ûî /¤°°ðh–סÆ2–7y“©Lå\Îe³¨¥6è²$é°<Ê£La y䱂L`BÐ%)`û%I’$I’$I’ö1"I’$I’º´aÆ5[—œœÌ¶mÛ"_—••‘ššÚl»´´4JKKfy®/}y‚'xœÇy„G˜ÊTŠ)º,I:hµÔò_üÿÁðŸü'æÏ `@Ðe©ðØ/I’$I’$I’´I’$I’Ô¥EG·ýë´´4ÊËË›­///'==ýh”¸™ÌäïüÍlf cxŽç‚.I’Úm [8›³ù9?g>ó¹“;‰ö×ÙÚÏc¿$I’$I’$IÒ>þM’$I’$u{cÇŽåé§Ÿn¶~Á‚Œ;6€Š:ÆHF²”¥Lcçq³˜E 5A—%I´’•œÌÉ|ÄG,a —riÐ%© ê©Ç~I’$I’$I’Ô³Ä]€$I’$IÒÑöï|‡óÏ?Ÿ0mÚ4^|ñE¾ÿýïóÜsÝ»sF_úò;~ÇÙœÍu\ÇJVòO0„!A—&IÍü‘?2“™œÊ©ü?BJÐ%©‹êÉÇ~I’$I’$I’ÔsØ!D’$I’$u{§œr =ö÷Ýw999äääpß}÷ñË_þ’ñãÇ]^‡˜ÉLÞà *¨` cx–gƒ.I’y€¸‚+ø _á9ž3 ¢Ãâ±_’$I’$I’$õv‘$I’$I]V(j÷÷.ºè".ºè¢£]R§ö)>Åë¼Îu\ÇùœÏ7øws7qÄ]š¤,Dˆð~Èù>ßg6³ƒ.I˜Ç~I’$I’$I’¤±Cˆ$I’$IRÒ‡>ü†ßð8ó(r*§²žõA—%©‡ª¥–ÿà?ø1?æa6 "I’$I’$I’$!’$I’$I=ÐLfòo°‡=œÌÉ<Ã3A—$©‡©¤’ ¹yÌc ø2_º$I’$I’$I’$©K1"I’$I’ÔC}’O²ŒeLg:p³˜E5ÕA—%©(£Œ38ƒ¬à^á\Î º$I’$I’$I’$©Ë1"I’$I’Ôƒ%’È\æò8ó(r*§²žõA—%©+¡„)La»XÂÆ26è’$I’$I’$I’¤.É@ˆ$I’$I’˜ÉL–³œ½ìå$Nâü1è’$uC¥”2•©ÔQÇ˼Ì1tI’$I’$I’$IR—e D’$I’$IÏñ,c_ä‹\ÎåÌbÕT]–¤n¢”RÎäLê¨ã%^"“Ì K’$I’$I’$I’º4!’$I’$IŠèE/æ0‡_ókã1&1‰u¬ º,I]Ü6p§"dD’$I’$I’$I:B „H’$I’$©™«¸Šå,§†Nâ$æ3?è’$uQE1…)ÄËßød]’$I’$I’$I’Ô-‘$I’$IR‹Žã8–²”«¹š+¸‚¯òUö²7è²$u!›ØÄd&“D¯òªaI’$I’$I’$é2"I’$I’¤Võ¢s˜Ãù#ó™Ï)œÂ|tY’º€ílçÎ!‘D±ˆÁ º$I’$I’$I’$©[1"I’$I’¤6Mg:ËXF=õŒe,ó˜tI’:±*ª¸ © ‚gy–A º$I’$I’$I’$©Û1"I’$I’¤vÁ^çu®æj>Çç˜ÉLö°'è²$u2õÔ3ƒ¬bÏò,9ä]’$I’$I’$I’Ô-‘$I’$IR»õ¢s˜Ã“<ÉÓ<Í$&ñ>ï]–¤Nä›|“…,äižf4£ƒ.G’$I’$I’$Iê¶bƒ.@’$I’$©MwÝëÖýëëðç_ýjãí.½¦M븺z°‹¹˜±Œå ®`,c™Ë\®äÊ Ë’°òC~ÆÏ˜Ï|Nã´ ËQWUZ ?ù ìÝû¯u……û– ý pË-žÞ±õI’$I’$I’$uB$I’$IRçwß}P^11ÿZ¿ü忾®©¤$!(‡^ánâ&>ÏçyŽçxˆ‡èMï K“€_ñ+f3›Ÿós¦3=èrÔ•­_<°ï¸ݤÑyøØ__uupå•B$I’$I’$IRÝö&’$I’$I»ê*ˆÝúhíàsŸ ¶Î(æ0‡§xŠ…,ädNf k‚.KR{ƒ7¸–kùßá:® ºuu'Bfæ¾ÀGkÇýº:ÈÈØ·­$I’$I’$IRe D’$I’$u~W^ ÕÕÞ&;N>¹cêQ3q+XA?úñi>Í<tI’:ÈV¶r9—s*§ò~t9ꢢ`ÆŒ}ÝÀZ_üâ¾m%I’$I’$I’z(!’$I’$©ó;†oýûqqpõÕN X9¼Æk|¯qW1“™|ÌÇA—%é(ª¥–K¹€ßó{bˆ ¸"uW^ù¯`-©©Ù·$I’$I’$IRf D’$I’$u W]Õú•ÂkjàŠ+:¶µ(–XîäNžâ)žáÆ1ŽÕ¬º,IGÉ·ù6ËXÆŸøƒt9êNF†ãŽkýûÇèQW$I’$I’$IR'd D’$I’$u _øBëW ÿÔ§`äÈŽ­GôY>Ë VL2ãÏ#<tI’ްßñ;îç~ã1Æ26èrÔ͘Ñr4.þýß;¾I’$I’$I’¤NÆ@ˆ$I’$IêÂWŠj¼>.¾øÅ`jÒ e(¯ð 7q_å«Ìd&»ÙtY’Ž€•¬ä?ønäF>Çç‚.GÝÕ•WBmmóõ55pùå_$I’$I’$IR'c D’$I’$u3gBLLãuµµN íÄb‰e6³ù3æžaãx›·ƒ.KÒaØÃ¾ÀÏxîäΠËQw–› cÇ6ƒFEÁ¸qû‚¢’$I’$I’$I=œI’$I’Ôu\y%Ô×ÿëëèh˜0† ¬$µÏ\ÀJV2L`s˜tI’Ñwù.›ØÄ¯ø±Ä]Žº»¦aИ˜}ë$I’$I’$I’d D’$I’$u!™™pÊ)û‚ °oé¤Ð.#›l^æenâ&näFf2“Ýìº,Ia‹x€ø?#‡œ ËQOð¹Ï5ƒÖ×Ãe—W$I’$I’$IR'b D’$I’$u-3füëóP¦O®´Xb™ÍlþÊ_yÇ8V±*è²$µÃvð%¾Ägù,WqUÐ娧HM…ÓOß×$&Î8ÒÓƒ®J’$I’$I’$©S0"I’$I’º–éÓ÷u‰Š‚Ï|fßDQu9S™Êr–3ˆAL`s˜tI’Úðu¾NU<ÄCA—¢ž¦aô*ÃH’$I’$I’$IaB$I’$IR×2p œuÖ¾î 'ˆªËÉ"‹—x‰›¹™¹‘Ì ’Ê Ë’Ô‚§xŠ'x‚ÇxŒT ⩃]zé¾î ÑÑpÉ%AW#I’$I’$I’ÔiÄ]€$I’$IêÞjjjزe [·neË–-”——³e˶lÙ®]»Ø¹s'»víâãÊJvïÜɶ­[ùøãÙ»w/{««ùxÏžF··}÷n.…x:cÕ_ú½þµATúö%**о}ûÒ;)‰¤>}0x0IIIôîÝ›~ýú‘œœÌàÁƒ4hP£”””޽ƒz¸Xb™ÍlNã4®â*Æ1ŽyÌc4£ƒ.MÒ~[ØÂ5\ÃWù*çq^Ð娃Õ×׳cÇvíÚEee%•••ìܹ“;v°{÷n*++ÙµkÛ·o' ±mÛ6víÚEmm-{ö졪ªŠšš*++#·¶{÷nª««[üÙá}æï_wyr2½zõ"11±ÅýèÝ»wäëþýûMŸ>}ˆ‹‹‹ìKßýç  ˆ,ûöíKŸ>}èÓ§ýúõ£_¿~‘¯ûöíÙV’$I’$I’$)hB$I’$IÒ!«­­¥¨¨ˆ 6PTTù|ãGñÑP\ZJÅ®]ö‰ÅÅ1(:š¾@¿úzúÖÖÒ'" ôz1@¿&?³ü xø¸¦†½55‘ï×;öÿÌÀÇû?¶[bbø8:šÑÑT›kk©¬«ktû±11 0€¡C‡’›KVv6Æ #++‹ìýŸ§¦zuü#í3|†å,çJ®d"¹“;™Å¬ Ë’ÜÂ-ÄË]Üt): µµµ”——³uëV¶nÝJEE‘ÐfEEE£õ[·neçÎ|üñÇ­Þf¯^½…&bbb"ˤ¤$âãã0`½{÷Ž| y¨ >>ž¤¤¤o?؈Ú¼žü+pÒ’ÊÊJjœ„C*;w®.<ݽ{7›7o¦®®.ò½†a—p¥%½{÷¦ÿþ¤¤¤’’ÂÀ-Ã!Ó†ëSSS‰õÏ2’$I’$I’$éÈñ/’$I’$©Meeeüãÿà½÷ÞãÝwßåÝþ“wV¯fýÆT™Mv\Yõõ ­©á$ H÷/Ñ &iv¨ºº} T[€­@9°¹®Žò­[ùhëV6¬\ÉëqqÌJ«« íß§R#†çØ‘#9þøã9öØc1bÇw\«“ZÕ¶! á%^âvnçFnäU^åQe‚.Mê± )äQå ž _³ˆž:ƒ;w²qãFÊËË)..Ž,ËÊÊ(--¥¤¤„òòrÊËË …Böíß¿?Œ|¤¤¤0tèÐH€!Ü£ÿþ‘®IIIôïß?üèÎjkkÙµk;vì ²²’Ý»wGº¡ìÞ½›;v4 ÒóöÛoGB7 ;¡DEE‘ššJjj*™™™¤¥¥‘žžNFF©©© 2„ÔÔT²²²"AI’$I’$I’¤1"I’$I’"êëëùàƒX±b+V¬`ea!+ )­¨ ol,#bbQ]ÍçB!ކÙ@z}=ìÝdù‡¤µÿ£™Pª«¨6ïîÞÍ»o½Å;«Wó›ØX>¬©¡¦¾ž˜èhF ƘñãsÒIœ´ÿcРA4š®/†f3›Ó9/ð&0yÌc c‚.Mêqê©çë|ILâr.ºœ«´´”¢¢">úè£H7®õë×G>w¿€}5RSS#!ƒ!C†0~üxRSSÉÈÈ ==½QçŠîè8\±±±$''7êhr0jkk#‘­[·6 èlÚ´‰òòrV¯^Mii)ååå:ž$''“““ÃСC6lXäó¡C‡’““CZZÚ‘¦$I’$I’$Iê „H’$I’ÔƒíÚµ‹×_Å‹Sðò˼ñÆìÚ³‡Ø¨(ŽgLu5ß …|Ȩ­…“{’xà˜ýSÂ+÷w©Öo×׳rÝ:V|ô÷?ù$›ö‡I²ÓÒ˜4y2§LšÄ©§žÊ¨Q£œ„Û†39“·x‹«¸Š‰Lä.îb³‚.KêQæa )äMÞ$Ѝ ËéÖŠ‹‹y÷Ýwyï½÷þÕëÝwY¿~=UUUDGG“‘‘ œ{€ÀСC#'¢¢|¬:‹p@'55µÍmC¡P¤“K8ìò§?ý‰’’êëëèÕ«¹¹¹Œ1‚c=6Ò­lĈdddí¡I’$I’$I’¤NÂ@ˆ$I’$I=Hee%ûÛßÈÏϧॗXµv-uõõ|"!IÕÕ\ 18!"± vûJ0bÿÇtˆEÊ•Àee,ùãùþŸþÄöÚZú&&òé‰9íÌ39ûì³ÉËË#:::ÀtN©¤òÏq;·ó-¾Å+¼Âc<Æ]šÔíUPÁ­ÜÊòŸœÈ‰A—Ó-ÔÕÕñÁðÖ[o±zõjÞyçHø£²²€¾}ûF&ö_zé¥ ><Ò"++‹¸¸¸€G¡£%**Š´´4ÒÒÒ8ñÄ–_sÕÕÕlܸ1yÿý÷yï½÷X´h=ô»víþõ<:öØc9î¸ã8ñÄ=z4ŸøÄ'<ß$I’$I’$©›1"I’$IR7·fÍž{î9ž_¸‚%K¨©­å¤¸8Ψ®æ¿I@ºá£"8kÿuuÔk€‚={XòòË<´x1ßûÞ÷<`gŸwçþÛ¿qÖYg1hР ËîTbˆa6³9ƒ3ø_` c˜Ç<&0!èÒ¤ní{|xâù>ߺ”.©¢¢‚U«V±jÕ*Þ~ûmÞzë-Ö¬YÃÇLLL Ç{,Ÿüä'™:u*×]w]dâ~zzzÐ¥«‹'77—ÜÜÜ¿_RRé4^Λ7ÿøÇÔÕÕÑ»woFŽÉèÑ£5jT$(’œœÜÁ#‘$I’$I’$IGŠI’$I’º¡•+W2oÞ<æýæ7¬ß´‰AqqL«­åáPˆ³Ôêê Kì‘¢÷\ Au5«€ç·oçùyó¸ú÷¿§.âÔOšÏ]uÓ§O'555Ø¢;‰)La%+™ÉLÎà îâ.n࢈ º4©Ûùy„GxéKß ËéôªªªxóÍ7Y¶lË–-céÒ¥|ôÑG 4ˆÑ£G3iÒ$®»î:FÅÈ‘#éÕ«WÀU«;ÊÈÈ ##ƒ3Î8£Ñú={ö°fÍšH@iÕªU<õÔSlݺ€aÆ1qâDÆÏ„ ;v¬ÏQI’$I’$I’º!’$I’$uï¾û.¿ûÝï˜÷Ûßòκuä$$pÅÞ½\Œ«©!&èÕ¢Qû?nª­eð"ð¯¿Îw–-ã×_Ïg&OæŠ/|K/½”~ýú[lÀRIå9žãÿñÿøßâe^æ1#¯l.I·s;Ùd3“™A—Ò)}øá‡¼öÚk‘È[o½EMM ©©©Œ?ž¯|å+œ|òÉœxâ‰dff]®Dbb"ãÆcܸqÖ³jÕ*–/_βe˸ãŽ;ؼy3qqqŒ3† &0aÂN;í4rrrª^’$I’$I’$ˆI’$I’º°úúzþö·¿1ç§?噿þ•ÌØX¦×Ôð0iï^{'t1}K€KB!ªêêö…C^y…^}•ÿ¼þz®¼ê*¾~ýõŒ5*àJƒE7s3™Èçù¿æ×<ÆcÄt9®]»X¶lùùùäççSXXHll,#FŒàÔSOåßøyyy|êSŸ"*Ê#¯ºŽÌÌL2339çœs"늋‹),,dñâÅðÈ#PUUEnn.S§NeêÔ©œyæ™ 80ÀÊ%I’$I’$IR˜I’$I’º ;vðàƒòа±´”³bcY ño55D]œŽˆ^ÀÀuu̯­åœ_<ü0“'MbÖ·¿Íg?ûÙ;ùø Î`%+™ÉLNã4~ĸ‰›ˆ2%–ðrÉåJ® º”ÀÔÕÕ±xñbžyæ-ZÄŠ+ˆŠŠbܸqœsÎ9üÏÿü§œr A—*qáÈ\@UUK–,!??ŸE‹ñè£ …8餓˜:u*çw“&M":Ú3PI’$I’$I’‚` D’$I’¤.¤²²’û￟ŸÞuõÌ—kk¹^Sti:Š’oÿYSà ÀÏ—.eú%—0zäH~xÇœþùWŒÁ æYžå~îç;|‡%,á—ü’R‚.Mê’Þå]þÀø-¿%¶‡ý긶¶–—^z‰?ýéOüùϦ¬¬ŒãŽ;ŽiÓ¦që­·2yòd t™R‡ëÕ«gžy&gžy&Û·o祗^bÑ¢E<õÔSÜu×]¤§§sÑE1}út&OžLllÏzÿ$I’$I’$)H^²I’$ýöî<®«2ïÿøë+à†¸ )j©h®hi¢f¥¹„ÙbZM™3e6™•Õí]ÓdÓtÿ¤¼­i²Ål™¸­{šÓš,‹t*+)qÉ41Í,— ÷\sCùý¡r‹¢‚Ûy={œ~¯ss½ÏáÔ18ŸsI’¤ ++‹gžy†gÅC‡r÷¯¿²<;›ÀÙA‡Ó).ÞËÉa^n.õ¾ý–«zöäü6mHMM :^ B„Ì`¦0…9Ì¡5­™Îô cI%ÒÿãÿÑ”¦\ÇuAG9%rss™2e ¿ÿýï©Y³&Ý»wgöìÙÜsÏ=,Z´ˆï¾ûŽQ£FÑ»wo‹A¤}ªV­ÊÕW_ÍóÏ?ÏâÅ‹ùöÛo¹ë®»˜5k‰‰‰ÄÆÆrë­·òé§Ÿ’››t\I’$I’$I’N{„H’$I’TÌ}ýõ×´kÝš?ÿñü~óf–egóࣩ¥[Kà={˜ TKO§K—. 0€Í›7-èÄ<æO<s1I$±‡=AÇ’JŒå,ç_ü‹ÿâ¿(sšÿØxãÆ<ýôÓy3€,Z´ˆ?ýéO,[¶Œ9sæðç?ÿ™¦M›S*š5kÆÃ?ÌܹsYºt)C† aáÂ…tëÖfÍšñÌ3ϰiÓ¦ cJ’$I’$I’tÚ:½³'I’$IR ¶{÷nþô§?Ñ®m[ª|ÿ=é99ü¨t0+m€É99ŒÏÍå½×^#¾qcÞÿý c"†>àF0‚ÇxŒÞôf#ƒŽ%•£Eêp ×å¤ùúë¯éß¿?uêÔá‘G¡{÷î,X°€3fpÿý÷tD©DkРüã™9s&óçϧ[·n$%%Q§Nn¹åæÎtDI’$I’$I’N;„H’$I’T mذî]»òˆŒÊÉᳬ,JÅÚuÀ·ÙÙ\²a½zõbذaäææë” b0ƒ™Æ4¾áZÑŠ4Ò‚Ž%k[ÙÊ«¼ÊÝÜM8áAÇ9á–,Y 7Ü@›6m˜7oÏ>û,+W®äùçŸ'>>>èxÒi©eË–¼ð ¬X±‚§Ÿ~š¹sç’ÀUW]eaˆ$I’$I’$I'!’$I’$3K–,áÂvíX:kŸggs :Ô$ ;Õç&xmÏ’ss–”Dß>}ÈÌÌ<Å)Їv´ã+¾âΡ3I"‰=ì :–T,½Ê«d‘Å­Üt”jíÚµÜu×]4oÞœùóç3aÂæÍ›Çí·ßN¥J•‚ŽwD‰‰E»ƒµÿ©ÞŸJ¯¨¨(î¸ãÒÓÓIIIaíÚµ´mÛ–[n¹…5kÖO’$I’$I’¤Ï‚I’$I’БŋsA»vTûùg¾ÊÎ¦Õ Þ¨KQüˆý±îçd *SAå§"Ç eÏþýÎ;ÜØ·o©œ) :ÕyŸ÷Áçq.åRÖ²6èXR±²‡=¼È‹ÜÂ-Dtœæ­·Þ¢yóæLœ8‘矞ùóçÓ»wïS2v(Ê[ŽÕž=‡ÞAŽ´¿‚úÆárëþJ«ãý~eœ’ìòË/gÖ¬Y¼ûî»L:•¦M›òúë¯K’$I’$I’¤Í‚I’$I’Š‰Í›7sÙ%—ÐpëV¦dgSó$Œ‘{Àׂ–cñÉaÆ(N‚Êtð¹9•º)ÙÙ¼ÿÞ{$%%˜$X!B f0Ó˜ÆR–’@Ó˜t,©ØxŸ÷ù¸›»ƒŽrBdee1`ÀúôéÃï~÷;~øáHxxø)˰¿ïxŠñ>ù¤hw¢ößïpu¥Ui-¼£gϞǼŸ“!  …ÈÈÈ W¯^DEEQ³fMn¼ñF6lØP`߃Û\ …0`Àaû¼÷Þ{\xá…y߃{ï½—­[¾Ë>kAm/åË—Ïë³`Á®¸â ¢¢¢ˆŠŠ¢G,X°ààÝ×¹9Ú8S§NÍ—o¿mÛ¶ŪU«òÚÂÃÃóúM:õˆÓÓÓéÞ½;‘‘‘T®\™Ë.»Œ?ü°PÇU˜ö-[¶pï½÷Ò AÊ—/O­Zµèß¿?_~ùe¾íÜþÀkàhçåà±—.]Ê5×\CµjÕ›óTéÕ«Ÿ}öŸþ9·ÝV¸ÿI’$I’$I’¤ÿcAˆ$I’$IÅÀ3#FpaXA=¾ZÐc€½€›ÀÌ}_ï9¨OîÁQQÆÜ ¬foÆàþƒú..~,Û÷ùZà†BæY$=÷mÿ#{ Oz+öõÉÚ7ÆßØoð/ X{P%÷€et!³á{öðcFï¼óÎ)­x b0ƒI#å,§­˜Â” c1xð`ž}öÙ×5Š;ððp/^Ìo~óî½÷^Ö­[Ç›o¾ÉðáÃó=Ì|óÍ7“Í”)Sزe Ë—/gðàÁŒ5êJŠ×y*Tár./Tÿâ|¾ýöÛüóŸÿä­·Þ"!!á˜öq2åæî½ 0€;3իWóÕW_±cÇî¿ÿþûÔ–›››·Œ=ú°ýaïƒö7ß|37ndæÌ™lܸ‘{î9ø.{ø¬µï_víÚEÛ¶mó®‡%K–˜˜HÏž=Y¶l?þø#7Þx#½zõbÅŠîïàñ snŽ6NçÎùꫯ¨\¹r^qì½>ªT©Â›o¾™×–MÓ¦Mùꫯèܹóaó}ÿý÷ôéÓ‡x€_~ù…™3g²}ûvzôèqÌçñ`7ß|3U«VeÆŒlÙ²…I“&±dÉÚ·oÈv]…=ÿû÷qçwrÿý÷³jÕ*&MštÄã8Ú¶mËøñãyýõ×ý»Š$I’$I’$IEÊ=Üo%$I b<㹞냎¢Ó@(büøñ\½×“$(99™C:e233©Z¹2/eeqË)ïpï€>Ú6gs謡¶-¨­0Ž4Æ»ì-ÌØo1ÐXy@ÛMì-Î|Ðön)D¦s€jHFîû¼è<ô>þ˜Ô9(÷‰:7Ç«GXU®¾šño½ÀèÅÓ¯üÊ0 <ÌÃü?þezJVVqqq|øá‡ÄÇÇçµoذ³Ï>›Å‹S£F úõëǹçžËþð‡¼>'N$999o憨¨(V®\IåÊ•Oùq¨diNsºÑQ®£8_§­Zµ¢eË–¼þúë'dÇ+ òà(âÝwߥW¯ÿ»›-^¼˜®]»²råÊBm¸çiÝ~›6mâì³Ï.pF’¢Œ0hÐ 6oÞÌØ±c¸ñÆ9çœsxàüwÐW_}•ôôtFŽYÐnòW˜sS˜qrss©U«ï¾û.çŸ>—\r >ø C‡%-- Ø[DqÑE±zõê#ÎñÛßþ–Ë.»Œ›nº)_¶¦M›æ;GE9·GEEñÓO?×¶|ùr4hpÔ1 {^ÜÇgŸ}vÄ"˜ üîw¿cÑ¢EÌ;7è(:ÁæÌ™@›6mN"I:’ý¿û|“7ÒS’$I’$ápÏ›:Cˆ$I’$I[°`»²²èx ÇÌ=h)ŒjìÁãd:Òú\Ÿ½³…ècöhìÈïðþ?S€>´_±oßûEïϰ·e2ù‹AŠ›N99|5cFÐ1Š•ÊTæMÞäE^äq'‘DVrU 4èÙFMÏž=©Q£Ó§O窫®Ê×§S§N¤§§ç}nÞ¼9÷ßÿ!˜KšÅ,±ˆ›¹¹ÐÛ×ëtùò大§sÛm·÷¾N¶NòßÍêׯÏêÕ§æ¿;ÕªUcãÆã¿“¿ñÆ|öÙg$''çµM™2…>}½ƒ^qÅ|üñLJ´¤0ç¦0ã„B!zôèÁ'Ÿ|ÀÊ•+Ù²e —\r åË—gùòå¼÷Þ{ôèÑãˆÅ Ÿ~ú)ݺuË×Ö¤I“£àE‡¸êª«ø÷¿ÿ7³I\\\¡Ç(êùo×®Ýñ>In»í6¾þúk~úé§ £H’$I’$I’TbX"I’$IRÀ~ùåbÌpðㆿ€3pöÎjqäÇ%‹®¨cT;ès9ͽž‚ÏcaÏíöš„Zbê[xˆ½³‰\µo»â,Xw> d Ó™ÎOüD+Zñ“cà@&L˜ÀúõëÈÉÉáÅ_äî»ïÎë“‘‘AãÆ …ByKttt¾‡¦ÇÇ/¿üÂÙgŸM³f͸å–[˜8qâ }xY%ßk¼Fsš“@B‘¶+Ž×éþ‡Ç›6mZämOµjÕòßÍÊ•+wRþÝüå—_0`gžy&áááy߇ãõÝwßñŸÿùŸ¼õÖ[TªT)¯}Æ Ô¯_?ß÷< Ë?þX¨}æÜvœË.»,¯ dìØ±üö·¿à†n`ܸq¼ÿþû\~ùåG͵~ýzbbb u Çê­·Þ¢]»vÜu×]DGGÓ©S'FŽIVVV¡¶/êù¯X±â >‚£yóæ…¾f$I’$I’$I’!’$I’$nÿ•ÛÎq ›Ø; Æ4 “¢Í$ä1ÀšÚ7aû:ƒJ.°ã ¾ßÃ9À;û¾gÛ€¨búhqІ6Ìe.]èÂå\NIäsJ3ÄÄÄpÍ5×ðòË/0qâDbccó½É½råʬZµŠÜÜÜ|KNÎÿe‹‹ãwÞaóæÍŒ7Ž:0|øp pJGÅ×nvó&oÒŸþEÞ¶8^§Õ«WÈ+RÜtÓMDFF2mÚ4233ó¾Çcûöí\{íµ<ñÄ´hÑ"ߺ˜˜6nÜxÈ÷<77—;¾ƒ»ÂŽÓ½{w¾üòKvîÜÉo¼Aß¾}¸öÚk™0a›6mbæÌ™tïÞý¨cV¯^ý˜¯­P(Ä®]»òµ4KK•*Uxúé§Yºt)Ë–-ã®»îbܸqy¹æTÿ“m‘ôþ§%I’$I’$IÒÑY"I’$IRÀš6mJ(âë ƒ`:ð{g½ß×v¢'<ctÞ- ý“Bn)0µ€ö/€óø¼èüp0¸•½…-Gs¢gZ)¬¹¡Íz€WùU¦2ãÇ«¼Ê_ù+‰$²šÕGßðÎ*d¶ã±x/,ŒK¯¸âŒVòõ£Ó˜FœË¹|ȇ§lì-ZдiS†Ê¢E‹¸þúëó­:t(>ú(¯½ö6l`ûöí|òÉ'\qÀ÷¶S§NŒ3†+VÍš5kxúé§éÔ©Ó);o˜@{ÚS‡:Ç´}q»NË–-Ëí·ßΈ#X¹rå1SIP§Nf̘AVV}ôguø;È\À<À?þÈîÝ»Y²d wÞyìwÙ—_~™ôôt^xá…×'%%1tèPþõ¯±aönÝJJJ }ûöeèСns,Š2Îå—_NRR7ÝtS¾ö¾}ûòÄOpÙe—j̇~˜G}”)S¦°}ûvæÏŸÏïÿûC ¡ ’˜˜ÈC=ÄŠ+ضm~ø!ûÛß ì;`À.\È®]»X»v-#FŒà’K.É×çp×À©:ÿ'ÓŠ+xê©§¸ýöÛ‰ˆˆ:Ž$I¥RyÊ“Y¨W]H’$I’¤âÄ‚I’$I’Š;q¡KOò8¡¾©üäìí¢=\ ìŒ÷ÀíB…l;Ñc®½0™½q@=à`l!3ÕÞÆ ZÀ£À(öÎP øŠüÅk€{g Èuð˜O¿Û·Ÿ»€‚­=±^v……qóÍ7Ÿ‚ÑNçqs™K7ºq—ñ ’CÎ){ðàÁ<þøãÜvÛm”-[6ߺøøx&MšÄøñ㉋‹ãŒ3Î`øðáÜwß}y}}ôQÞ}÷]ZµjETT;v$''‡7Þxã”äWñ–C)¤Ð›ÞǵŸâv>ôÐCÔ¬Y“«¯¾šmÛ¶×±¯ý³88›CAmEíûôÓOó»ßýŽJ•*q×]wågÔÿÿø™™™´oßž¨¨(®¾ú꼂œ£UPÛàÁƒùæ›o¨X±"¡P(oÙ¯~ýú¼ýöÛŒ7ކ R«V-}ôQFE¯^½8’¢œ›¢ŒsÙe— …èÝ;ÿµÞ«W/"""¸üò˘k¿–-[òú믓””DLL W^y%×]w]¾›ÃÃSO=EÙ²e9ï¼óˆå…^àþçé;eÊ*T¨ÀÅ_LåÊ•éС™™™¼öÚkùöw¸k °çåÀœE™qædÛºu+½{÷¦N:üéO…)¯•$I'C*°“AÇ$I’$IEÊÍÍÍ :„$•$!BŒg<×sýÑ;KG …?~ü!oS•¤Ò.99™C:¥²³³iOì²e|”ítÂü$„‡óŸ=Ä#<tœéüƒ;¹“¶´e,c©Mí“:ÞÎ;‰ŽŽfÉ’%Ô©sl38H‡“J*éÌw|Gšó~ŠãuºlÙ2.¼ðBêÕ«Çĉ©Y³fБ$Åš5kèÕ«Ë—/'--FI'Áœ9shÓ¦MÀI$IGòþÀt¦3ƒAG‘$I’$I8Üó¦>_"I’$IR1ÎkcÇ2-Â÷"ëDÙ \A£æÍy衇‚ŽSbõ£Ó˜Æ*VÑŠVü›Ÿ´±6oÞÌO˜<™÷*V$1"‚Ÿ‚¤å} Cx8ÕÏ9‡_}EÓ¦MƒŽtÚéG?f3› l ­˜Ä¤ #I…òoþM,±$t”S¢oß¾,^¼˜aÆ‘œœLÆ ùïÿþoÖ¬Yt4©TY½z5ÿýßÿMƒ xì±Ç¸ûî»ùî»ï¸á†‚Ž&I’K,kYK9AG‘$I’$IE`Aˆ$I’$IÅP÷îÝI›9“ qq´ç9`OСT¬­ú–)ÃU@ÏnàÓ/¾ fÍšAÇ:m5§93™É¥\Ê•\É`“EVб¤#ú„OèF7B„‚ŽrÊ”+WŽûî»%K–pûí·óì³ÏR·n]úôéÃgŸ}FnnnÐ¥ÓRnn.Ÿ~ú)×_=õêÕãÙgŸeÀ€,[¶ŒaÆQ©R¥ #J’¤ƒÔ¡Ùd³ŽuAG‘$I’$IE`Aˆ$I’$IÅT|||8çw 6ä¯ý+ݺu#==´´4n¼ñFÊ—/tLI’tû BV²2à$’$I’$©(,‘$I’$©hÙ²%ÓfÍ"åƒXÙ¢­ë˜t0r™À?€–ô/S† ûöeñ?0ò¹çˆŠŠ :^©ÕŒf|É—\Ã5ô¤'ƒÌnvKÊó ŸÐ†Ô§~ÐQŠsÏ=——^z‰µk×òÑGѶm[ž{î9š6mJ‹-xøá‡™:u*»ví :ªT¬ìÚµ‹Ï>ûŒ?ÿùÏÄÇÇÓ¬Y3FEûöíùøãY³f /¼ðçœsNÐQ%IRT Õ¨æ !’$I’$•0„H’$I’T‚\~ùåÌþúkÞž0eÍ›sp^D¯;‚§“j9ð pVDÂÂhý›ß°ðÛoùß×^#...èxbïÃ3É$ówþÎ+¼ÂE\Är–KàS>¥Ý‚ŽQ,…‡‡“˜˜Èßþö7V®\ÉÔ©SéÚµ+o¼ñ]ºt!::š=zðä“O2wî\öìÙtdé”Ú³gsæÌá¯ý+—^z)ÑÑÑtíÚ•ñãÇsÉ%—ššÊªU«x饗¸ä’K:²$I:Fu¨Ã VC’$I’$!’$I’$•0¡Pˆ«¯¾š9óç3cÆ â¯»Ž»""83<œ»i€ªž¶ÿ® ãìPˆ15jðÿõ_ü”‘Á˜±ciÒ¤IÐU€~ôã+¾"“LZÓšñ¯ #©”ÛÊVf3›®t :J±ÆÅ_ÌsÏ=ÇÒ¥KY¶l#GޤjÕª<ùä“´iÓ†š5kríµ×2bľøâ vì°$S§—íÛ·óùçŸóä“OrÍ5×P£F xê©§¨^½:Ï=÷Ë—/gÉ’%Œ9’N:Q¦Œ¿n’$étД¦|Ë·AÇ$I’$IEàkš$I’$I*ÁÎ?ÿ|Î?ÿ|ž~öY^yåÆüïÿòÂ÷ßsV¹r\¿k}€¶A‡T‘d“€qeÊ ‘ $vëÆ›Ò«W/ߺ]B4¥)_ò%CÂõ\Ï=ÜÓŸ|ò Ÿþ9O=õkÖ¬!<<œ–-[Ò¡CÚ·oOûöíiܸ1¡P(èøÒQåææ²xñbf͚ŬY³˜1c , ;;›ZµjѾ}{þüç?Ó­[7Z¶léu-IÒi®%-y׃Ž!I’$I’ŠÀ'$I’$I: œqÆ<øàƒ<øàƒ,\¸·Þz‹qÿøO-_N͈ºgeÑHªV‡X| L ãÃPˆm99thÛ–Çûö¥oß¾Ô¨Q#èˆ:å)ÏHF’@ƒÄt¦3žñ4 AÐÑTÊÌd&u©K-j¥D …Bœ{{î¹Üwß}¬ZµŠ9sæ0gÎÒÒÒøßÿý_vîÜITT7¦yóæ´iÓ†øøxZµjELLLÀG¡Òì×_å‡~`áÂ…Ì™3‡o¿ý–yóæ±~ýz"""8çœsèÔ©÷Þ{oÞu+I’J—´`ËØÎv"‰ :Ž$I’$I* B$I’$I:ÍÄÇÇORRsæÌ!%%…Éï½Gßyó(\Î¥»wÓHÊœ·4ZL>>Œˆàû¬,¢*Tà’K.á¯W\ÁUW]E­Z>¸}º¸‰›hG;®çzZӚьæ:® :–J‘YÌ¢=탎qZª]»6µkצgÏždee1oÞ<¾þúkÒÓÓùæ›oxÿý÷Ù¼y3°wÆ‘–-[Ò¢E š7oN£FhÔ¨ÕªU ò0tšÙ´i?üðßÿ=‹-â›o¾á›o¾áÇ jÕªœsÎ9´lÙ’ë®»ŽÖ­[ÓªU+"""‚ .I’×’–ìa‹XD AÇ‘$I’$I…`Aˆ$I’$I§±6mÚЦM†ʆ øè£ø÷äɈykBf1‹! áz®g Å(ÊR6èh*f3›?òÇ c” ´mÛ–¶mÛækÿé§ŸòÊOOOgâĉ<õÔSìÚµ Ø;ÓW£FhÒ¤I^‘HãÆiذ!‘‘¾™Y‡Ú¾};K–,á‡~È+þØ¿¬_¿€råÊѨQ#Z´hÁí·ßNË–-iÙ²%uëÖ 8½$I*®ÒH"ù†o,‘$I’$©„° D’$I’¤R¢zõêôíÛ—¾}û°jÕ*ÒÒÒ˜6mŸ|ò #¾ý–=¹¹ÔŠˆ MNñ{öÐh4BA†/!²ÅÀà[`aÙ²ÌÌÍe}VaaœÓ¢—^|1Ã.ºˆnݺl`Rå)ÏHFÒ‰NÜÊ­ÌaãOCM§±å,g-k9ŸóƒŽRªÕ«Wzõêqå•Wæk_µjß~û-Ë–-Ë[ÆŒÃâÅ‹ÉÉÉ ZµjÔªU‹ÚµkÓ A4hïsýúõ)S¦L‡¥“hÓ¦M,[¶ŒU«V±zõê¼ëcÿçåË—“›› @­Zµˆ§E‹\uÕU4hЀæÍ›Ó´iSÂÂÂ>I’T’”¡ ÍhF:éAG‘$I’$I…dAˆ$I’$I¥TíÚµ¹îºë¸îºëظq#_~ù%óæÍãë¹syçË/yòçŸÙ“›KÕðpš–)CÓÝ»i ù–rCP6°wÖïö}ý¾LGDð}VY{öP±\9Z4kFëöíÖª­[·æ¼óÎsp-×Ò’–ô¡çqÉ$Ó‡>AÇÒij³ˆ ‚Ö´:Š P»vmj×®}Hû®]»Xºt)Ë–-ã§Ÿ~âçŸæçŸfÁ‚¤¤¤°zõê¼b€òåËsÖYgQ³fMjÕªE­Zµ¨Q£uêÔÉ÷µfÍšŽ,''‡uëÖ±víZV­ZźuëX¹r%k×®eõêÕ¬Y³†5kÖ°bÅ 233(S¦ ±±±Ô¯_ŸºuërÑEQ·n]êÖ­KÆ iРåʕƿI’¤“å.à ¾:†$I’$I*$ B$I’$IÑÑÑôèу=zäµmÛ¶ùó瓞žÎwß}Çâ… I]´ˆŸV¯fOn.eB!Ίˆà, ÞîÝœ œ ÔÛ÷µp%kv‘]À:àç}ËŠ}ËOeʰ""‚÷ìaCVÊ–¥qƒ4nÑ‚ÞMšϹçžK“&M|#·Ž¨1™Á †0„¸ø€—y™ T:šN3_ò%-hAE*EEP®\9š7oNóæÍ \¿{÷n222ò E222X·n«V­bΜ9¬Y³†Õ«W³cÇŽ¼m¨Q£Õ«W§zõêDGGç-111DGGçk¯^½:QQQDEEªÃ.Q¶nÝʯ¿þÊÆÙ¸q#6lÈûºÿÏ·¯[·§õê IDAT.o怊+R»vmjÖ¬Ill,­[·¦FyuëÖåÌ3ϤlÙ²©$I*m.æbžçy6³™ªT :Ž$I’$I: B$I’$IÒaUªT‰ .¸€ .¸ _û®]»øá‡X¼x1K–,aÅŠü´|9.[ÆŠU«X¿eK^ß2¡1ááÄ”)CLn.1YYÔÈÍå  Pˆ*Q@å}ÞÿèrUò”D‘ÿ;Ø[Ä‘—m_Àæ}Þl¶íûó6`ýþ%,Œõaaü ±.'‡­ÙÙyû  £VL uëÖ嬆 ézÖYÔ«WÆÓ¸qcêÖ­K(T’Ê]Tœ”§<#ÉÅ\Ì­ÜÊB2žñœÍÙAGÓidó8ó‚Ž¡¬lÙ²4l؆ ±ßÖ­[YµjU¾(,VÈÈÈ ===¯ˆá×_-p?U«V%22’J•*Q©R%ªU«FÙ²e©V­•*U¢jÕªTªT‰ˆˆ"##)[¶,åË—§B… „‡‡ç•T«V €Ê•+ç+œ¬Zµj÷Óˆˆ*UªT`¦mÛ¶‘µ¯@ó@¹¹¹lÞ¼9ïsNNNÞqmÚ´)ï¼dgg³sçN233Ù½{7Û·o'++‹mÛ¶±iÓ&¶oßζmÛØ¶m›7oÎûóöíÛóíÿ@UªTÉ+¦Ù_\Ó²e˼¶ØØXj×®7sËáŽM’$)Hé À|AOzF’$I’$•!’$I’$©ÈÊ•+G‹-hÑ¢EëwîÜÉÏ?ÿœ÷ðéúõëó-‹W®$mݺ½Yþú+Û33Ù}@!ÆÉR©B*–/O¥Š©Ã±±T¯QƒF11ÄÄÄP³fMbbb8ãŒ3¨W¯±±±Îô¡“î®á<Îãnà<Î#™dnà† cé4±€ô¢WÐ1¨¨(š4iB“&M Õ?;;;ßÌ[·neëÖ­ù "¶mÛÆÂ… ™2e ­Zµ"22’-[¶ð믿æ_äää°cÇvíÚuôAT®\9*V¬˜W´FåÊ•©R¥J^ñKÍš5ó _*UªDdd$U«V%**ŠÊ•+ç›i%<Ü_¹H’¤’/šhâ‰'•T B$I’$I*üí„$I’$I:á*T¨P¤P¼·roÙ²…;v°sçÎ|oõÞoóæÍäæææ}Þÿ0ç~û愽oê®X±"+V¤jÕªÇyTÒÉSŸú¤’Ê<@_ú2‰Iü¿Q‘ŠGßX:Œ5¬á~¡ïI §FÔ¨Qã°}’““yæ™g¸öÚk=z4‘‘‘GÜçþÙ7öìÙÖ}3ˆx/ÏÎÎfëÖ­n{¤¢’ƒïÿŠŠŠÊ+Î…By¨R¥ eʔɛÅD’$IëLgRI :†$I’$I* B$I’$IR±AµjÕ¨V­ZÐQ¤@”£#Iºð{~O;Ú1žñÄt4•P X`AˆNˆÌÌLî¸ãÆŒÃðáÃyà…BGÝ®lÙ²yÅÕ«W?Ù1%I’tt¡ /ò"ëYO 1AÇ‘$I’$IGP&è’$I’$I’þOozó5_EèÀXÆI%ÔC 5©t•ptìØ‘””&OžÌ!C U "I’¤’éR.¥å˜ÈÄ £H’$I’¤£° D’$I’$I*fêQÏùœA âFn¤ýØÁŽ c©„YÈBZÒ2è*áRSSIHH ++‹Ù³g“˜˜t$I’$d©Hwº3 AG‘$I’$IGaAˆ$I’$I’T EÁ_ø ïð)¤Ð–¶,`AбT‚,`ñÄC%Xrr2‰‰‰tíÚ•´´4ââ₎$I’¤Sä®ác>f3›ƒŽ"I’$I’ŽÀ‚I’$I’$©ëE/æ1*T¡Ø #©„XÌbšÑ,è*233éß¿?ƒ bذaŒ;–ÈÈÈ cI’$éêIOB„˜Ä¤ £H’$I’¤#° D’$I’$I*æêR—Ïùœ?ðúíûg;ÛƒŽ¥bl3›ÙÄ&Ò0è(*a222èØ±#)))Lž<™!C† …‚Ž%I’¤S¬*UéB&0!è(’$I’$é,‘$I’$I’J€pÂI"‰wy—ø€¶´å¾ :–Š©%,° DE’ššJBBYYYÌž=›ÄÄÄ #I’$)@7p)¤°žõAG‘$I’$I‡aAˆ$I’$I’T‚\ÅUÌcÑDÓžö$“t$CKYJaÔ¥nÐQTB$''“˜˜H×®]IKK#...èH’$I XúP‘мÆkAG‘$I’$I‡aAˆ$I’$I’TœÅYLe*ðwr'ýèÇv¶KÅÈR–rgQ–²AGQ1—™™Iÿþý4hÆ cìØ±DFFK’$IÅ@*ð[~ËK¼D.¹AÇ‘$I’$I° D’$I’$I* '‰$&2‘IL"æ3?èX*&–±Œ†4 :†Š¹ŒŒ :vìHJJ “'OfÈ!„B¡ cI’$©¹ÛYÊR¦25è(’$I’$©„H’$I’$I%Ø•\É<æC íiÏHFIÅÀR–Z¢#JMM%!!¬¬,fÏžMbbbБ$I’T µ¤%èÀ˼tI’$I’T B$I’$I’¤îLÎä3>cC¸û¸‰›ØÆ¶ c)@?ó3õ¨t SÉÉÉ$&&ÒµkWÒÒÒˆ‹‹ :’$I’б;¸ƒwx‡¬:Š$I’$I:ˆ!’$I’$IÒi œp’Hâ#>âc>&ÒI:–²šÕÔ¦vÐ1TÌdffÒ¿ İaÃ;v,‘‘‘AÇ’$IR1×—¾ÄËFE’$I’$Ä‚I’$I’$é4Òn|ÅWÔ çs>#t$b›ØÄNvZ¢|222èØ±#)))Lž<™!C† …‚Ž%I’¤ ‚îã>þ‡ÿa낎#I’$I’`Aˆ$I’$I’tš9“3ùŒÏÂîã>~ÃoØÂ– céYÅ* B”'55•„„²²²˜={6‰‰‰AG’$IR s;·S™Ê¾t@’$I’¤bÆ‚I’$I’$é4FI$ñ1“FíhÇ<æK§ÀjVP‹Z'QqœœLbb"]»v%--¸¸¸ #I’$©*Oyþƒÿàyžg3›ƒŽ#I’$I’ö± D’$I’$I:u¥+é¤Szt ƒos-V±Šr”#šè £(@™™™ôïߟAƒ1lØ0ÆŽKdddб$I’T‚ÝÅ]„æÿWJ’$I’TŒX"I’$I’$æjPƒÉLfCøà®ñ®§±Õ¬¦µ :Š’‘‘AÇŽIIIaòäÉ 2„PÈëA’$Iǧ2•ù#d#XŪ ãH’$I’$,‘$I’$I’J…0ÂH"‰ù˜™Ì¤­˜Å¬ cé$ø…_¨IÍ c( ©©©$$$••ÅìÙ³ILL :’$I’N#÷r/1Äð0E’$I’$aAˆ$I’$I’Tªt¡ ó˜Gšp13’‘AGÒ ¶ Dt  99™ÄÄDºvíJZZqqqAG’$IÒi¦<åyœÇy׸Н‚Ž#I’$IR©gAˆ$I’$I’TÊÔ ÿæß<Â#ü?p5W³‰MAÇÒ ²‰M„”2™™™ôïߟAƒ1lØ0ÆŽKdddб$I’tšêC:Ðû¹?è(’$I’$•z„H’$I’$I¥PˆC¦ð%_ÒšÖÌdfбtlbÕ¨t "tìØ‘””&OžÌ!C…BAÇ’$IÒi,DˆŒàs>ç_ü+è8’$I’$•j„H’$I’$I¥Xg:3y4£èÄ7s3ÿÁ8ã¤$I’$I² D’$I’$I*åÎà &1‰'y’ÿ⿸š«ÙÈÆ cé9CH霜Lbb"]»v%--¸¸¸ #I’$©”y–g)Cîçþ £H’$I’TjY"I’$I’$‰!3˜)Lá+¾¢5­™ÁŒ céld£!§±ÌÌLú÷ïÏ Aƒ6lcÇŽ%222èX’$I*…ªP…—x‰Wy•ø(è8’$I’$•J„H’$I’$IÊÓ‰NÌcñÄÓ‰N$‘ÄöK…”E;ÙIªE'AFF;v$%%…É“'3dÈB¡Pб$I’TŠõ¤'×r-È6¶G’$I’¤RÇ‚I:>ÿÏ|ðÁ®KOOçÌ3Ï$;;€ÔÔTÚµkGùòå©_¿>¯¼òJ¾þ[·nå Q£FT¬X‘*Uª˜˜HJJÊI?I’$I’ C ð#Ácþøc’’’¨W¯DGGså•WòᇞÒc’$I’$© íhÇlfÓ’–t¦3I$±‡=AÇÒaìŸ!¤N¢!33“þýû3hÐ † ÆØ±c‰ŒŒ :–$I’tˆ>ôa¸…[ø‘ƒŽ#I’$IR©aAˆ$ƒ°ˆ0 tÈ,!£G¦gÏžÔ¨Q€éÓ§sÕUWåëÓ©S'ÒÓÓó>7oÞœû￟•+Wžüà’$I’$ƒbH!…Œà1ãR.e-kƒŽ¥ì/q†’/##ƒŽ;’’’ÂäÉ“2d¡P(èX’$IÒa=ÇsÔ£}èCYAÇ‘$I’$©T° D’ŽÑÀ™0aëׯ ''‡_|‘»ï¾;¯OFF7& å-ÑÑѬ^½:¯Ï¸qãøå—_8ûì³iÖ¬·Ür 'N$77÷”“$I’$I‡"Ä`“FKYJ LcZбtìœ!¤¤KMM%!!¬¬,fÏžMbbbБ$I’¤£*OyÆ2–, ‰¤ ãH’$I’T*X"IÇ(&&†k®¹†—_~€‰'K»víòúT®\™U«V‘›››oÉÉÉÉëÇ;ï¼ÃæÍ›7n:t`øðá 0à”“$I’$IGÓ–¶|Í×´§=]èBIìaOб´ÏþB,)¹’““ILL¤k×®¤¥¥t$I’$©ÐšÓœçxŽ¿ð&21è8’$I’$ö,‘¤ã0xð`^zé%²²²5j÷ÜsO¾õ]ºtaâÄÂý ³\¹rœ{î¹ 8É“'3~üø“Y’$I’¤ãV…*¼Å[Œ`ó8ÝéÎÖKÀ.v{ßÌ«’%33“þýû3hÐ † ÆØ±c‰ŒŒ :–$I’Td·r+ÈÜÈ|æG’$I’¤Óš!’tZ´hAÓ¦M:t(‹-âúë¯Ï·~èС<ú裼öÚklذíÛ·óÉ'ŸpÅWäõéÔ©cÆŒaÅŠdgg³fÍž~úi:uêtªG’$I’¤B b0ƒI#å,'¾à‹ c•zÙdNxÀITtìØ‘””&OžÌ!C…BAÇ’$I’ŽÙs­b(a ²!„|)!Û' ÷o.®‘™÷œó¼'C0ÃyæÆ4äAåÑ ãH’$I’T©T :€$UT¹¹¹AG$I’$©\ªCf2“$’¸™›YÄ"^äE∠:Z¥—CŽ«ƒ”cÉÉÉ 8¸¸8–/_NË–-ƒŽ$I’$Wr%ÛÙÎMÜD 1 ehБ$I’$Iªü˜8I’$I’$IÇÄ0†±Œe¤‘F;Ú1ŸùAG:!„pµ‰ò())‰ÄÄDzôèÁÒ¥K-ƒH’$é„3’‘ÜÎí g8oðFÐq$I’$Iª,„H’$I’$I:fÎäLRH¡=èMo~ÃoÈ!'èX•Vžßr&33“!C†0bÄ&NœÈŒ3¨Y³fб$I’¤@ü–ßr-×r)—2—¹AÇ‘$I’$©Â«tI’$I’$I•[êð/Ñ›Þ g8ð3˜Ac­Ò©BrÉ%‡"‰ :Î /--‹.ºˆÔÔTæÍ›GbbbБ$I’¤@…ñ'þDª0€Ìd&ýét,I’$I’*,W‘$I’$I’t\ f0KYÊ6ÐŽv¼Å[AGªtªø W ^rr2;v$++‹åË—[‘$I’âü‘a ã.a3‚Ž$I’$IR…e!D’$I’$IÒqÓžö¤BOzrç1ŠQd‘t¬JãઠÙdœäÄ–””Dbb"=zô`éÒ¥´lÙ2èH’$IR¹"Äø#É`ó/I’$I’¤ ©JÐ$I’$I’$XjS›Ìà—ü’¸ù˜Ì  M‚ŽVá¹BH°233>|8/¼ð÷ß?cÇŽ% K’$I*—B„xœÇ bCÈ$“ë¸.èX’$I’$U(B$I’$I’$b0ƒéHG2v´ãyž§7½ƒŽU¡,„¸êÊñ—––ÆE]Djj*óæÍ#111èH’$IR¹w°R‡: cØÀ=Üt,I’$I’*Œˆ H’$I’$I:qÎé|ÀœË¹œÏùŒb”e†£Pjd’p’Krr2;v$++‹åË—[‘$I’Jé>îãža"¹†kÈ&;èH’$I’$UB$I’$I’$ªµx˜Æ4žáÎælÖ°&èXRmj°‹]'9q$%%‘˜˜H=Xºt)-[¶ :’$I’T!]Ã5¼Â+¼ÌË\ÄEìfwБ$I’$I*÷,„H’$I’$I*3˜å,g7»9‹³ø :R…S‹Z€…ã!33“!C†0bÄ&NœÈŒ3¨Y³fб$I’¤ ­/}y›·YÆ2zЃl :’$I’$Iåš…I’$I’$IåÆù!ÿäŸô§?}èÃ(F±}AǪ0\!äøHKK£K—.Ì;—yóæ1nÜ8B¡Pб$I’¤J¡XÊR2Èà,Îâ_ü+èH’$I’$•[U‚ I’$I’$I‡ªNu’HâlÎf#xŸ÷™Å,ZÒ2èhåÞÁBv²3à$•Wrr2$..ŽåË—Ó²¥¯KI’¤Ê(==Ù³gS­Zµ £œ°FVÉ3ÝŸáçñ?gÐâAüì?? :’$I’$IùìÝ»—K.¹„ØØØÀ2X‘$I’$I’T. f0?á' d íiÏT¦r1«\«A "ˆp…c$))‰o¼‘0uêTjÖ¬t$I’$#ëׯ§S§NtèÐ!è('´‘ŒäNîä¡nQ»[mã1"ˆ:–$I’$I¤¤¤°~ýú@ !þ”,I’$I’$©Ü:Óø¹š«È@F1Š}ì :V¹"DMjZ)c™™™ 2„#F0qâDf̘aD’$I:"‰äAdÓH"‰ó8Íl:–$I’$I内I’$I’$IåZuª3™É<Çs<˳t¦3_óuбʭÚÔ¶R†ÒÒÒèÒ¥ sçÎeÞ¼yŒ7ŽP(t,I’$é„r5W³ˆEü‡ÿОö,fqБ$I’$I*,„H’$I’$Iª1ˆñ/ö±ö´g6³ƒŽT.Õ¢;ÙtŒJ!99™Ž;’••ÅòåËILL :’$I’tÂ:‹³ø˜éD'zЃñŒg?ûƒŽ%I’$IR ,„H’$I’$Iª0~ÀøÂ2빞}ì :V¹R‹Z|ÇwAǨ𒒒HLL¤G,]º”–-[I’$I:áÕ¡³˜ÅÃ<Ì<@ú°…-AÇ’$I’$)0B$I’$I’$U(ÑD3™Éü•¿2‹Yt¢«Yt¬r£6µÙÅ® cTX™™™ 2„#F0qâDf̘AÍš5ƒŽ%I’$é€!ná³˜ÏøŒ¶´åïü=èX’$I’$ÂBˆ$I’$I’¤ iø'ÿ$‡ÎäLf1+èHåB-j±“AǨÒÒÒèÒ¥ sçÎeÞ¼yŒ7ŽP(t,I’$I…ø)?å>¡}8ŸóÌ`Ëñ’TÆB¡¡PˆÖ­[“]äóÇòØe¿’$IA±"I’$I’$©ÂjCÞç}†0„˸Œë¹ž½ì :Öqµí¬bÉ$3“™l`ò!×r-çs>íiOsšó!µ\KNN¦cÇŽdee±|ùrƒŽ$I’$©u¨ÃÓ<ÍlfówþN[Ú²˜ÅAÇ’¤JgõêÕ<÷ÜsAǨ4Ê¢”b±E’$d!D’$I’$IR…M4“™Ì+¼Â˼L':ñ_ë¸ø”O©G=H ݸ‚+ø”OI%•éLçïüù˜µ¬¥ U‚Ž[n%%%‘˜˜H=Xºt)-[¶ :’$I’¤R¸˜‹ù„OhCzЃ;¹“L2ƒŽ%I•Êoû[²²²‚Ž!I’¤ÃX‘$I’$I’T)\ÄE¬`QDq&g2“™AG:æÚІ4Èû}.¹d‘Å^ö’MvÞ㵨E;Ú±\ËÌÌdÈ!Œ1‚‰'2cÆ jÖ¬t,I’$IG 1™Ç<&3™)L¡=í]-D’ÊHBBkÖ¬áÙgŸ :Š$I’c!D’$I’$IR¥Ñ‚$“Ì5\Ãå\Î`³›ÝAÇ:fªR•ÑŒ.võ"èJW"‰<ŽÉÊ¿´´4ºtéÂܹs™7oãÆ# K’$IÒQb#øœÏ9ÓèJW®çz¾åÛ £IR…v÷Ýwpÿý÷³wïÞÇïÛ·‡zˆvíÚQ£F jÔ¨A»víxä‘G ¬2’ššÊ…^H­ZµhР7Üp»wý~Þ /¼@·nݨ[·.ÕªU£uëÖŒ;–;v”jN»wïæú믧AƒÔ®]›þýû³fÍš#žG¸c}ÿ) åûÍ“O>ÉÏþs6lHÕªUiܸ1_|1 ,k‡>¾gÏnºé&5jD•*ÿ{qÉ’% 4ˆøøx¢¢¢¨_¿>½zõâÿøGsup_áœ3I’tü…rsssƒ!IIˆ³˜Å@E•@(bÖ¬Y èëI’•””İaÂŽ!I’*¸×x¡ ¥-˜Å,NåÔ #ßò-iÌw|WèóQDñq ·çdåWrr2$..Ž×^{–-[I’*­””:tèpI*¿U/ò"·p ըƓ¸P(Äk¯½F¿~ýò=%Ÿ3I’NDÇóýƒ¢®7u…I’$I’$I•Rú³‚T£èÀK¼t¤c¢uÆ0¢ˆ*ôù,²èF·ãªKJJ"11‘=z°téRË ’$IR%v%Wò_pçÑ—¾ô¡©¤K’*œˆˆˆ¼UB~÷»ß‘™™YäØÇœE‹Q·n]ž}öY¾ùæ¾ùæžV9óJ IDATyæêÔ©Ã{ï½Çã?ÀÃ?̺uëhÞ¼9 .dçμ÷Þ{üë_ÿ*°ßgžy†3fФI^~ùe6oÞÌîÝ»Y¶lguŸ}ö<ð@ØsJIIÉwÌfÍš±nÝ:zè¡RÏ£4c-Ïäæææû0sæLÆϺuëØ·o[·nåÍ7ߤW¯^aíãP|ðûÛߨ¹sg¾ç{÷îͼyóؾ};ûöícݺu<öØcäæærÿý÷Ñ9“$IÁ°"I’$I’$©ÒjNs²k¸†+¹’Á f7»ƒŽUæF3šýì/ô¹ÚÔæÇüø8'*2332d#FŒ`âĉ̘1ƒš5kK’$IÒ1C Oó4ïñ©¤’@ãO&E_Ì,I*èÒK/å´ÓNcÆ üéO*rÜÁ<{ì1®¹æN>ùdN>ùd®½öZ&Mš”oÌœ9s˜2e ]»v¥V­ZtëÖÉ“'ØïÔ©Sx饗¸ä’KhРÕ«Wçç?ÿy^‰âðÕ+ŠSÔ1ß|óÍRÏ£4cKÒ¬Y3–-[Æ£>ÊóÏ?϶mÛ¸à‚ ˜?~Øó;tžçwµjÕÊ÷ø­·ÞÊ“O>Éi§F5ˆç–[¾_aøßÿþw‘û*îœI’¤`„r «…J’Š"Ä,f1%–JPÔ^’t¢KJJbذaAÇ$I•̼Á5\Cq¼ÌË$t¤2u—ñ*¯’EVÞcDp>ç3‡9& ^ZZ]t©©©¼ôÒK$&&I’N)))tèÐ!à$’T:~ÿªœ²ÈâIžäîá$Nâ·ü–Á :–$•[¡PøßŠ/¾ø"W]u5â믿Îû°C/A¬^½:™™™lÞ¼™ äÛßæÍ›iذ!Õ«Wg÷îÝDGG³wï^222¨[·nÞ¸ŒŒ êÕ«—oß5kÖd÷îÝDFFæ{üЕ1¢¢¢Ø·o_Xs*ê˜ÑÑÑìÙ³§Tó(ÍØÂÎë¡–-[Æ•W^Éš5kò=þË_þ’çž{ކ –¸CŸß¶m111ùž›5k—_~y‘Û¾ßpÏ™$I'¢ãùþAQ×›ºBˆ$I’$I’¤B?úñ1s'ñ3~Æ‹¼t¤2u;·“Mv¾Ç"‰äü" DåCrr2;v$++‹åË—[‘$I’N`QD1ŠQ¬béÌÕ\Mozó)ŸM’*„Ë.»Œ6mÚ°iÓ&žxâ‰ãz샅œœrrrØ¿?û÷ïÏW\ÈÊÊ*jó £S§N|ùå—,]º”‡~˜‹/¾˜ZµjñÖ[o1jÔ¨Rïïð2À}÷ÝGnn.Çç‹/¾`Ïž=äææ²k×®²˜‚$I:Î,„H’$I’$I:a4£‹XÄHF2ˆA f0ßñ]бÊÄù1]èBªä=–EÝé`ª`%%%‘˜˜H=Xºt)-[¶ :’$I’¤r  Mx‰—x÷ø†ohG;†1ŒtÒƒŽ&IåZdd$wÝu?üp¡cÚ´iÀßÿþ÷ÏÍ;€üàyïÕ,^¼8߸äääÛ&$|¿Úï?ÿùϼUA û®¢Žy0SiæQš±ð¿7²³³ Œ¨R¥ :ub̘1Ìž=›•+Wðü#ì}gõêÕ<øàƒ´iÓ†èèhÞyçb·+éœI’¤`X‘$I’$I’tB©BäA^çuþÆß8‹³*ͧÁŽc\¾UBêP‡38#ÀDÇΊ+xê©§ }.33“!C†0bÄ&NœÈŒ3¨Y³æqN(I’$©¼ëF7þÅ¿x‰—x›·iMk~ÃoØÉΠ£IR¹uÅWкuk¶lÙRäó¿þõ¯yþùçÙ²e [¶laÚ´iŒ=:ߘ¾}ûpÓM7‘œœÌ®]»X¸pa¡+aŒ1"o›©S§²fÍöìÙÃÞ½{ùÏþßÿüg:uêö<Š:fŸ>}J=ÒŒ8ù䓘={6™™™ùrsÎ9¼ð ¬[·Žììl6oÞÌ_ÿúW€|c‹ÛGIš6m |_êÙ¶mÛ·ogöìÙ 6ì¨Î™$I F(·4µXI!BÌbtU¡PˆY³f1p ¯'I:TRRR‰o8J’$•…4Ò¸ŒËXÁ &3™_ñ«cÞäMîáÞáêQ/€”áË%—38ƒÏù€>ôáu^8UÙÛ»w/ ¬^½š¿üå/ 2$ï¹´´4.ºè"RSSy饗HLL .¨$‰””:tèpI*¿xö°‡ÇxŒ‡xˆÚÔæîá®!Ѝ £IR`®Bqø%†Ï=÷\¾÷c}~ïÞ½$&&XMâ nݺ1þ|ªV­ÊÖ­[i×®ëÖ­Ë7梋.âÕW_-°ï›o¾™)S¦›¹¤Ë!Ωÿþ¼öÚkùž‹gåʕԫW¯Tó(ÍX€k®¹†iÓ¦šý`¾Â 6Œ§Ÿ~ºÄ}:ÏÂÎÇ#<ÂØ±c <>xð`žþùÛ…{Î$I:Ï÷ŠºÞÔB$I’$I’$°šÒ”d’ËX†3œÁ æ;¾Ë{~ k¸’+ù˜ÉÈ“†'DˆÛ¸-ï¿Á/ÈÊÊbÙ²e<ðÀœ{î¹Ô©S‡öíÛsË-·ðÆo°mÛ¶ c—ÚC=Äš5k€ïÿ!|ùòå$''Ó±cG²²²X¾|¹eI’$Ia«Nuîà¾äK.äBnâ&~ÈyØÏþ ãIR¹rå•WrÊ)§ú\µjÕxûí·yðÁiÛ¶-ÑÑÑT¯^¶mÛòÐCå+FÔ¯_ŸE‹Ñ·o_jÖ¬I½zõøÕ¯~ÅôéÓ Ý÷þð,XÀÅ_L“&MˆŠŠ¢zõê$$$0zôh>þøã°ç0}út†JLL 5kÖ¤_¿~,Z´(¯ØPšy”f,Àã?Î 7Ü@óæÍ‰ŠÊ_<|ÿý÷:t(-[¶$**Š ЩS'ž~úiž|òɰöQ’Ñ£GóÈ#pê©§R­Z5ZµjÅ„ xæ™gŽêœI’¤`¸Bˆ$•’+„¨,¹Bˆ$ÎB$IRæ2—! ádNæe^æ4NãlÎf+È" €ÙÌæb.8iñ²È"žx¾ánþóÍ|þ×ÏYºt)ß}÷7¦[·nüìg?cõêÕ,\¸ÿûß´mÛ–®]»Ò½{wºtéRnÿ!÷Ë/¿$!!¬¬ï¿&‘‘‘Ô«WÛn»»îº‹0uêTjÖ¬pRIø û’*.¿i-k¹Ÿûy–g9•S™À.æbBýÉí’$UFÅ­6"IÒ‰®<¬Rå˜Y’$I’$I’*€ ¸€øˆË¹œŸñ3ºÑRÈ!€"ø¿âΡ! N›_vv6+W®dÁ‚,Y²„oÛ} #aÖ=³8§Ë9Üÿýœ}öÙœyæ™yÿ€{ÐÎ;ùðÃY°` ,`Ê”)ìß¿ŸV­ZѳgOzöìI=¨_¿~@³ûŸÜÜ\~õ«_å{,''‡ŒŒ ÆÏĉ7n\@é$I’$U&ÍhÆÓ<Í-ÜÂxÆs—ÑžöÜÍÝô¥¯ÅI’$IR¹`!D’$I’$I’hF3’Iæ2.ã^É÷Ü~öóßq×1‡9%üÞáÅ‹³cÇ5jÄ9çœÃïNþíÓÛsΆs @W»ví¼â,ˆL:µÜDž{î9-ZTàÓ³³³HKK;î™$I’$Un§q3™ÉíÜνÜKúó#~ÄÜÁ%\BAG”$I’$À,„H’$I’$IÒ!6²‘, ‚ö³?ßsYdñ&o2‹Y\Ê¥Ç-SI &¹Hi•ׂÈÖ­[=zt‘ÏgggóÄOо}{†zL³H’$I:ñü˜ó:¯ó)Ÿò0sWqw0Žq\Ã5DtDI’$IÒ ÈBˆ$I’$I’$M6—p »Ù]  rPˆ×s=]éJ,±Ç&Çq,€”¤¼DFÍ®]» ¬r¸n¸¶mÛrÖYg•éñ%I’$ à ÎàyžçNîäwüŽ‘ŒäAänáZ®¥&5ƒŽ(IR™*éý8I’, !’$I’$I’tÀ]ÜÅr–YÈ%—ÝìfØÜ29ny*€”$ˆ‚È{ï½ÇôéÓKüÇ稨(²²²xñÅ-„H’$I:¦~À˜Æ4îå^åQ~ÃoÏxnànäÆcö’$I’$ÊBˆ$I’$I’$ð±ŸýDE6ÙäRx!‹,ÞäM^äE®äÊR§"@Jr¬ "{÷îeذaDDD““Sàù¨¨(²³³©Q£ýû÷gàÀôîÝ»Lç(I’$IEiIKžà &0'y’'x‚Gy”A b4£ù!? :¢$I’$©³"I’$I’$Iüƒð5_ó&o2ƒ,g9DKnUCB„ÎpºÒ•xâ‹Ýoe*€”¤¬ "<ð©©©ùÊ ‘‘‘äææIbb"C† ¡_¿~T­Zõ¸ÌQ’$I’×€ÜÃ=Œc³˜ÅC<ÄT¦Ò™ÎŒbq‘DS’$I’TÉX‘$I’$I’¤C´¢£Ü6³™yÌc&3y›·É!‡"È!‡\rÙË^®ã:Þâ­|û8‘ %9š‚ÈçŸÎï~÷;rrr…BDDD››K=g S¸—{¹‡{Ä ®çz~̃Ž)I’$Iªà,„H’$I’$IRêQAn;³wòçÿþ™÷¼È'­>áúÕדÓ1ÇÈ(® òì³Ï …ÈÍÍeË–-|òÉ'4lØ0oI’$IªNã4žà à¦1'y’§xŠŽtd(C¹œË]5D’$I’tD,„H’$I’$IR ²³³Y¹re¡+€\ØíB:të@â¿-€”C ">ø`±+ˆgAD’$IREp'1êÀ-…’Hâ6nãn¡}Æ0~Á/áÏ•’$I’¤ðX‘$I’$I’¤ÃWq㫸D,ˆH’$Iª¨:ЧyšGx„™Ìäyž'‘DNåT®à †2”¦4 :¦$I’$©œ³"I’$I’$é„g¤â° "I’$©2©C†¸­bÏóã3¦3§xŠ{¸‡Ó9K¸„A âN :ª$I’$)B$I’$I’$Ux@. "’$I’*ªxy€XÆ2f3›§xŠ L Ä 28₎*I’$I:N,„H’$I’$Iªp,€¨¬X‘$I’TÑDÁÙnðÿàÌd&ws7·r+=èÁe\F?úQv‘$I’¤ÊÌBˆ$I’$I’¤rÏˆŽ "’$I’*’ªTå‚·L2y›·™Ílnæf†1Œv´ã.àR.å‡ü0踒$I’¤2f!D’$I’$IR¹cDå…I’$IE4Ñô9p{‚'x‹·˜Ã¦0… L ú¸ý„ŸADБU†233IOOgÆ ¤§§³~ýú¼{€&Mš›w߸qcbcc‰ŽŽ8¹$I’¤£a!D’$I’$IRà,€¨¢° "I’$©"¨Mm.9pË!‡÷yŸ¹Ìå5^ãwüŽ4 7½éCzÓ›ZÔ:âc}ÅW4¤!u¨S†3ÐA{öìÉ+wlذ7æûuð±mÛ¶åÛ®aÆ4jÔˆøøx>úè#6mÚÄ7ß|“o\½zõˆ‹‹£qãÆÄÅÅåûu°4Ò¤IªW¯~Üæ,I’$)|B$I’$I’$w@TYX‘$I’TÞEÉÙnò ÿæßÌaoð/ðթιœKúðK~Iq¥Úg:0•©ô¡Ï±˜B¥´gÏ222ò•: »OOO'777o»˜˜˜|Ž3Ï<3ßï7nL||8¼èáªGç`q¤$Å•tG_å`I§¤UGÊëj,’$IÒ‘òÿn%I’$I’$•Ȉt|X‘$I’TEÁYnwr'»ØÅK¼Ä0†¹MÖÛhF3“™Lc?àG¥¤¢ÇÆIKK#+++o›èèè|E„„„B qqq¾¯QT¯^V­ZѪU«bÇ•´ºKJJ 6l`ûöíù¶‹‰‰)vµ‹#’$IªHü¿VI’$I’$IT¶È¡sssLR:s—Uæ²Þ_Y; 𧬳¾ûî»L™2…äädrrr8õÔS¹ñƹúê«ËõkØ‚ˆ$I’¤ò¨µ¨JU"ˆ`?û‹»Ÿý¤B[Úr÷q·Idqá=Ö®]KvöÿV$‰ŽŽÎw‡ò~øJª|ޤ8’‘‘Q൵jÕ*6lØ@zzz¾÷!Â)Ž4mÚ”¨¨¨c=UI’$©HB$I’$I’$UºÈárss‹,”gs—×ýËc—uÖ_üâ´mÛ–ùóçsÆgðå—_2räHÒÒÒ¸ûî»Ëì8ÇšI’$IåÅ"Id‰…ø~µ€ßðOœ þx;þ³#ï¢üÿþ÷¿ääää/®èqð¾I“&Ô­[÷˜ÍO•G¸Å‘ÌÌL¶mÛVd))ÜâÈáe¤¸¸8š7oN­ZµŽõT%I’t²"I’$I’$€*{D*ÌôéÓiÛ¶-mÛ¶eÚ´itîܹBBgAD’$IRöîÝËü*óÉŠÌÊ÷x(7D(çûdÈÌ…ˆCžÜ¡­!26eðÑWq §ZôhÚ´)uêÔ9¾’ø_é` ©({÷îeëÖ­%G6mÚÄþýû 쿸UG|ýK’$©´,„H’$I’$I' íØ±ƒñãÇóÆo°aÃbbb8÷Üs1b?ùÉOŠÜîàùY»v-7Þx#ï¾û.5jÔ 11‘É“'¸ÐþÓO?eìØ±,^¼€.]ºððÃsÆgä÷ÙgŸ1fÌ-ZDdd$=zô`òäÉ…føôÓO7n‹- sçÎ<úè£öY”´´´°³‡sœ9sæðÐC‘’’Bll,ýû÷ç¾ûî£víÚÅæ8x.}Í :”©S§†uÍš5´lÙ2oü—_~Illl¾c§¦¦º:LýúõÙ»wo±+ "’$I’ŽFI+$¼Oß’Nî®\ˆö@Ä7TÛ^“¶žDLV õ3ëÓ$² -ªµàÔZ§rzÌéü(þGÔŠ«qÀÌ g*¹jÕª…UÙ·o[¶l)ðç'##ƒ7’’’ÂܹsK\!§°{WÈ‘$IÒAB$I’$I’¤JÈHÉ®¾újÚ·oÏûï¿OݺuYµj7Ýt?ýéO -”››K(âºë®ã–[náÅ_$##ƒQ£FqÛm·ñ—¿ü%oìW_}EïÞ½™0aÓ¦M¾/Oüò—¿dáÂ…´nÝ€Õ«WÓ»woÆÏ´iÓˆˆˆà­·Þâ²Ë.+pü¯¾úŠÄÄDî½÷Þ¼±óæÍ£_¿~$''_âÜÏ~ë­·š=Üãôë×§Ÿ~š·ß~›o¿ý–qãÆqÓM7å͹¤sYÜù.)k‹-øä“O:t(ÿüç?ó¶Û¿?mÚ´á¯ý+-Z´(tß'N¤sçÎ%ž¯ŠÌ‚ˆ$I’$€={öyQúá…CÅÄÄä»=!!!ï÷5>ªA‹F-h߆¨æQÐ< ÉIåTÕªUÃ*ŽdddYÂ:XY»v-ÙÙÙyÛV‰‰‰)´D"I’¤Ê+”[Ü¿´I’ b³ÈÀ £¨…BÌš5‹}=IÒ¡’’’6lXÐ1$IªPJ*€tîÜù„/€^<¨]»6ÿýï©W¯^Þc©©©´jժ؂ÂÁ}½þúëôë×/ï±/¾ø‚=z°~ýú¼Ç®ºê*Î:ë,F•oûI“&±bÅ ¦OŸÀ AƒèرcqÓ¦Mãšk®É—窫®¢mÛ¶Œ;6ߨgŸ}–•+W¹ªHqÙSSS9ûì³ d?Òãdddкuk¶nÝZàØ‡ŸÛâ !áf8óÌ3ùË_þÂücæÍ›Ç£>Ê;3oÜÚµkINNfÊ”)¤¦¦òÁpÊ)§9—Êîð‚ÈŠ+,ˆH*S)))%^'Iåß¿TQVô8ü~Æ lß¾=ßv‡= »oÖ¬UªøY³RyR\qäà}ZZYYYyÛDGGY9ô>..î„}ßP’$éHÏ÷ŠºÞÔBˆ$•’…•% !’T8 !’$•ÌHé^<èÕ«»wïæ®»î¢gÏž¥ºÈ% ±mÛ6bbbòÛ»w/Õ«WgÿþýyÅÆÆòá‡Ò¼yþIMMM¥S§NyŸ¼ZÔ¸ôôtâââòå.jì¦M›èÞ½;«V­*uöœœ¢¢¢ÂÊ^šã„Sþ(©NV€É“'óÅ_ðä“Opþùç3bÄÎ?ÿüûlÞ¼9 `̘1ÄÆÆ;IeÍ ª%UT~ÿRÐÂ)z¬[·Žo¿ý6o›ªU«R¿~ýW hÞ¼9‘‘‘ÎNÒ±VTqäЕ֮]Ë®]»ò¶©V­õêÕ+±8KDDD€³“$I*?ÊC!Ä¿$I’$I’T”T™0a‚Rš={6&L`äÈ‘lÞ¼™víÚ1`ÀFŒATTT‰ÛZR€ïÿÑüðbÖ-[ -ÄÅűeË–ÇöØÖ­[iÑ¢E¡™ªW¯^bn(˜=22²@öp³yófn¿ývÞzë-ÒÓÓÉÉÉ +C¸ÂÉ pÅWðÃþG}”ôôtRSS9ï¼ó Ýçš5kÊ4ceR»ví¼â,ˆL:Õ‚ˆ$I’t^¤}èEهߧ¥¥±sçμm¿H»U«VtîܹÀEÚ5²è! øþý”˜˜ŠW\ùì믿fÉ’%%–Ï¿?XF³|&I’t|X‘$I’$I’Ê! ÇÞI'ĤI“˜4i[¶láwÞáñÇgñâÅüõ¯-“c4hЀ7(Vlܸ‘ ä—žž^`5Ž­[·ºÏU«V(J”µp3hÐ ~ðƒ°dÉâããóVZ9ޯ˓O>™Î;3sæLþïÿþ›o¾Ù?eÀ‚ˆ$I’ž¢>ÿÐû´´4²²²ò¶‰ŽŽÎ·ŠGQE?_Ò±R½zuZµjE«V­ŠNqdýúõìØ±#ßv111Å®6Ò¸qcš5kVª•{%I’”Ÿÿ'%I’$I’$•@Ž¿P(DZZñññ4hЀK/½”ÄÄÄ"WÅ8={öäÕW_eôèÑùíµ×ò.°èÕ«¯¿þ:£FÊ7îwÞ)°ÏsÏ=—… Ò¿ÿ|/^¼˜Q£FñÑG•Iöp³lÙ2fΜIݺuóÆìÞ½;ìã”åëùꫯæþûïgçÎ|üñÇ…Ž)lu…Ï‚ˆ$I’N4eQôHHH gÏž.„Ž‹‹;n?ã~œèèhêׯOûöí0`W\qU«V=.YŽD(*ô繃ó*?ë¯,E‹c}L(çùh9cyìwß}—)S¦œœLNN§žzßeeÿ IDAT*7Þx#W_}u™})‹âÈÆIIIÉ[!éPáGš6mÖª¾’$I' !’$I’$IR,€”×]w¿ÿýïiݺ5Û·ogòäÉùŠGëÞ{ï¥{÷îÔ©S‡¾}û …˜3g“&MbáÂ…yãÆO·nݨ]»6}úô!22’ ðÀØçøñãéÛ·/999tïÞªU«’œœÌðáÃyâ‰'Ê,{¸ÇéÔ©cÇŽåŽ;î qãÆ¬]»–‰'†}œ&MšðþûïÓ±cGÞ{ï=†JZZÚe¾à‚ øÕ¯~ÅðáéQ£Fç;wîL(bÉ’%G´dAD’$IѾ}ûزeKÞEÉE]¸¼víZ²³³ó¶‹ŽŽÎwqr‡ ½hùX¯èx$rssó öìÙCzz:ÿüç?yæ™gøýïÏ«¯¾Ê©§žpÒÒ98¯ò >ž“N:)ÀÙ•­êÕ«Ó²eKZ¶lÉ¥—^JRR½zõâã?®Tó”NÓ§O§mÛ¶´mÛ–iӦѹsçã^ ×Á祤ïë‹#›6mbÿþýö_\y¤Y³fÔ®]ûxLW’$é˜ åVôµü$é8 b³ÈÀ £¨…BÌš5‹}=IÒ¡’’’6lXÐ1$I:*%@:wîlD*c/¿ü2sçÎåùçŸ:ŠŠpxAdÅŠD¤J.%% Ø‹¼$©<òûWùRÒ'ɼ÷‚àâV¬?Ôõ×_OÆ ó­úøé§Ÿ2vìX/^ |ÿ!?ü0gœqF¾m?ýôSÆÇ¢E‹€ïWh|ôÑGóÛ±cãÇç7Þ`Æ ÄÄÄpî¹ç2bÄ~ò“Ÿ›ûpC‡eêÔ©yϯ]»–o¼‘wß}—5j˜˜ÈäÉ“ ülN΢¬\¹’1cưtéR"##éܹ3¿þõ¯9÷Üsóe-ìC ?ï…=Îù)é\„3¿ƒûøê«¯3f ï½÷Û·o/4çá™Ã=ÏsæÌᡇ"%%…ØØXú÷ïÏ}÷Ý—ïÏÚ‘¾ÂÝqó8|žá¾.Â=îgŸ}Ƙ1cX´h‘‘‘ôèуɓ'Ó¼yó#:ö‘|ͶoßNË–-ÉÈÈ(ñœTeU,ì¾²%IRÙ:žïu½©…I*% !*KB$©pB$I‘)8¡Pˆ?ün¸©S§Ò¾}û #)LD¤ÊÏ ª%UT~ÿ:>Â-z¤§§ç»è9&&¦Ø‹w7nL||ýôÓ\uÕU|ûí·Œ7ŽP(Ä´iÓJ=ߢ²”´ÿâ¶=tÿ¥=o%wõêÕtïÞñãÇsÁÁ[o½Å“O>Éûï¿TÇ.Í×ìÖ[oå‹/¾`îܹ%ž“MFFF±çlܸ‘µk×’·M8Å‘ÆàÌ$IR,„HRd!DeÉBˆ$ÎBˆ$©"°"•¡Pˆ¨¨(FŽÉc=t "RåãÕ’**¿={ö”XòÈÈÈ`ãÆù¶ §èÑ´iS¢¢¢šYÅVR!ä»ï¾£Q£FìÚµ €«®ºŠ³Î:‹Q£Få7iÒ$V¬XÁôéÓóƵmÛ–±cÇæ÷ì³Ï²råJ&Ož @íÚµùïÿK½zõòƤ¦¦ÒªU«£.„¼þúëôë×/ß~Ï>ûlÖ¯_Ÿ÷X¸9 sÅWлwo ”÷Ø_|Ái§Vf…pÏOQû+ÍüB¡ï½÷ݺu+rÎ…å ç<&##ƒÖ­[³uëÖRÏ7…í¿¸yºÿ£y]vÜAƒѱcÇn¦M›Æ5×\sÄÇçk¶víZ’““™2e ©©©|ðÁœrÊ)EŽWñÂ)ޤ¥¥‘•••·Mtt4111%–Gâââ|ÏX’¤JÂBˆ$U@BT–,„HRá,„H’Ê# ’tüY‘*>/¨–TQùý«pá=Ö¯_ÏŽ;òmNÑ£Y³fT©R% ™J*„ìÚµ‹ØØØ¼BHll,~ø!Í›7Ï7.55•N:åzŠ·iÓ&ºwïΪU«èÕ«»wïæ®»î¢gÏž¥úz—TÙ¶m[¾OæÏÉÉ!**Šýû÷ç=nÎÂÄÆÆòÑGѸqãRç ·îù)j¥™_(â»ï¾£FÅÎçðã†sž‹ÛþHæ[š|á\wø¸£y]”féééÄÅÅñ±Ãùš…B!š7o΀3f ±±±ÅfWÙ§8²nÝ:öíÛ—·M¸Å‘ØØX"""œ$I*Iy(„ø“´$I’$I’TŒ5kÖpýõ׳téR¾ûî;7nL·nÝxä‘GèÚµ+mÚ´ :¢$UZµk×Î+~lÛ¶E‹±páB.\ÈÔ©SøÑ~ÄwÞÉ%—\d\I’¤JåÛo¿å†n`Íš5lذôôt233óž¯V­±±±4iÒ„FqÚi§ÑµkWâããiÔ¨ñññ4lØF8 •ƪU«ò­&°eË–B/(‹‹cË–-y¿ßºu+-Z´(tŸÕ«WÏûïÙ³g3aÂFŽÉæÍ›i×® `ĈG½êË¡%€ÈÈÈå€psfË–-4hÐà¨2–ähÏOiçWš2ÈAáœçÍ›7sûí·óÖ[o‘žžNNNN¡û:Òù†»ÿp…{ÞÂ=nQn {ìX|ÍÖ¬YSâ•­˜˜bbbHHH(vܦM›øæ›oX·n›6mÊ»_¿~=Ÿ~ú)óçÏ'==½{÷æmMll,7¦E‹<õÔSÔ©SçXOI’$U0ÖG%I’$I’$) ‰‰‰AG$I’$ €§Ÿ~š /¼0ï÷ 4È[äP7nÌWŽhРÛ¶m#77·À¯Ý»wç;餓˜4i«W¯æë¯¿fäȑ̜9“Ë/¿üØN¬”9 S¿~ý|%˜Ò…Bù.ð†ïËî‡;Úós4ó+Kƒ ¢fÍš,Y²„ÌÌ̼ ‡;Òù†»ÿp…{ÞÂ=nƒ HOO/ðøÖ­[øØ’$IRq\!D’$I’$I*F‹-˜?>ÙÙÙ¬\¹’ °dÉÆŒÃŽ;hÔ¨çœs;wæì³ÏæÌ3Ï$ û„’͈#HJJ :J©íß¿?°cþ:­^½:Í›7çì³Ïf̘1Çlõ›P(tTjoÆ ãÉ'Ÿ¤JßNÂÎ;ùðÃY°` ,`ÅŠìß¿ŸV­ZѳgOî¼óNzôèAýúõƒŽ*I’TéÔ©S‡_|1ßc{öìaãÆlذ¡ÀýçŸλï¾ËúõëÙ±cG¾íbbbˆ‹‹£qãÆEÞ7kÖÌÿïÐSO=Å;ï¼ÃÊ•+óëÙ³'¯¾ú*£GÎ7öµ×^Ë[ÅàÜsÏeáÂ…ôïß?߸ŋ3jÔ(>úè#àûŸÓÒÒˆ§Aƒ\zé¥$&&¹B¡Êâ½–ps¦[·n,X°€Áƒç=¶råJ.¿ürV­ZUìqcccY»v-§žzjÞc‹-*0.ÜóSÔ¹8šù•¥eË–1sæLêÖ­›÷Xaå†#}=„»ÿp…{ÞÂ=n¯^½xýõ×5jT¾Çßyç#>v¸*Òû-•IFFF¡/z¿nÝ:öíÛ—·Mtt4111yžqÆ$&&øû166–ˆ?ó[’$ÏŸ¤%I’$I’¤0T©R…:СCÆW  rï½÷Z ÈØ±c9çœs‚ŽqD »àxÉÍÍÍ+gäææ²k×.¾úê+æÌ™ÃÏ~ö3fΜI¯^½ËW^téÒ…ßüæ7<úè£AG9!”T7nœI’¤U¯^V­ZѪU«bÇWÙ¸q#)))dddX}"œâHÓ¦M‰ŠŠ:–Ó}ˆŒŒdÁ‚<ðÀG|ìptîÜ™P(Ä’%KJT¨pŠiiideeåmsxÑ#!!ž={øû,..Î÷Œ%IR™ åZ –¤R b³ÈÀ £¨…BÌš5‹}=IÒ¡’’’6lXÐ1$I*•à "‹/¶ r¤¦¦2dÈ’““ƒŽR!µZÇܹs¹ãŽ;øä“OŽÛ1˳®]»2}útš5kt”J§¤HÏž=-€H\JJ :t8‰$•Žß¿ŽÌÌL¶mÛVìŶ6l ===ßÏáGâãã©Zµj€³+_/¢ZµjÔ¯_ŸöíÛ3`À®¸â ªU«V`»ÿûߌ;–ŋߗæ~øa~ô£å÷å—_rûí·³`Á²³³9ýôÿgïÎë¨ý¿„U‚I@ÀÖ[l’ª°ÙÄ¥—Û‚öVQomT¢^1¸°µT„¢µ¢uoËb­H•*ÇŸ ‰`%.­`í#¨¬aÙIB~„ IH€$“åýš'Ï1sf2ŸïI 0g>óý6wÞyg‰™Þzë-}ôQ-ZÄÎ;騱#—]v&L U«VGÍ?{öl233Y»v-;wæá‡—TŠþ3RÞúÊä,ÏâÅ‹3f 999œrÊ)Œ1‚±cÇVxÌÍ›7“‘‘ÁüùóÙ³gçw=öXøß˜ÅÛVöõ)ïµ¨ìøŽüY¨Ì¿ÑåuÞ¸q#·ß~;o¼ñÛ·oçôÓOçÞ{ïåŠ+®8®ñ©²_ÿXÆQ™×íXŽ»lÙ2ÆŒÃ;ï¼CDDýúõã‘G¡gÏžÇuìÊ|ÏúöíKdd$ÙÙÙåŽ_E*SôXµjùùùá}¢££úgNñcLLL€#“$IA¨Éóå]oj!D’Ž‘…U% !’T6 !’¤úÀ‚H͘ê멊Y‘/¨–TWùû«v©lqdÆ nºé&ÆÏÿøG,XÀ~ðƒÏ—·_Y뮿þzn½õVþøÇ?²mÛ6222¸ýöÛyæ™gNhÛã=ÖŠ+¸è¢‹?~<Ï>û,5bþüù•º#fEÞzë-¾õ­oE¥‹‹.ºˆ &ðì³Ïðꫯrá…²`ÁºwïÀ5×\CïÞ½Y¼x1mÚ´aùòåŒ5ŠsÎ9'œ¥x|ee;–ñTôýýüóÏIKKcܸq<ûì³DFF2oÞ<.½ôR.\H§N* ))‰_üâBŽCEÌÌL ’$IªPñ»ñññG½È¦¢ ‚³³³½ X’êÊ=**&''[”$Iõ–3„HÒ1r†U%g‘¤²9Cˆ$©!p‘ªÑ£G/^L»víJ=ÁÛo¿Í AƒÊÜ·¼bBYë#""xå•W¸ôÒKÃë>ûì3Î;ï<Ö®]{BÛï±®¾új’’’ÈÈÈ(±ÿŒ3øÉO~R©BHññ Ù½{7Ÿþ9sæÌá·¿ý-3fÌ --«®ºŠï}ï{¥Ž3yòd>üðC^xáZµjÅW_}EÛ¶mÃÛ|ñÅ$$$”š¥¬lÇ:ž£}¯ºê*¾ûÝïrÇw”XÿôÓOóñÇ3uêÔcʼiÓ&RRRøì³ÏÊ|õ g‘t$ï°/©®ò÷WývàÀ6oÞ̶mÛŽz‘ñªU«ÈÏÏïW™âH||<111ŽN’j¿ÊÎü”››[âMLLÌQÇÄÄеkWZ´hàè$IRCRf±"IÇÈBˆª’…I*›…IRCdAäø´mÛ– 6иqãRÏEDD°{÷nš7o^æ¾ÇZÙºuk‰‹zöïßO³fÍJÜ}ðx¶=ÞcÅÆÆòÞ{ïÑ¥K—ûoÙ²…víÚUºR¬Y³ftîÜ™””î¸ãN?ýô£ç‹/¾ _¿~¬_¿€óÏ?Ÿ={öpÏ=÷ššJTTÙT—÷ºëxŽöý-ïkmذÁƒ³|ùòcÊ|àÀâããÙ¼ys™Ï7d@$UÄ ª%ÕUþþR±mÛ¶õbåõë׳zõjòòòÂûDGGSay$..ÎãKªWöîÝ[aÉ£¸Œw¸ŠŠñññœzê©ež”$I Rm(„”ýî–$I’$I’¤Ebb"‰‰‰dff–*ˆŒ7΂HvîÜYîEü@¹eãqä^›6mZnéâX¶=Þý7oÞLlll©}õÂûŠr•wœ¸¸¸‰Ù³g3aÂn¾ùf6mÚD¯^½øÑ~Ĉ#*õfýñŒ§¼ïï–-[èÚµk™Ï5kÖì˜3GEE±cÇŽ ÇÐTTÉÌÌ´"I’¤z%&&†˜˜zöìyÔíŽVY¶l¡P踋#±±±DFFV÷P%©\•)z¬]»¶Ôù“#‹‰‰‰¥~ÏuîÜù¨ç÷$I’ttþMJ’$I’$Iª…,ˆTN«V­ÈÏÏ?®»FDD°ÿ~š6m^·uëÖªŒW­ÚµkGnnn©Y0rss«ü8ëׯ/U°X¿~=íÚµ ~ÒI'1yòd&OžÌæÍ›yë­·˜2e ‹-⥗^ªÔqªj<íÚµcùò奊5Gªlæüü|N:é¤cÎQX‘$I’*çX‹#ÅwÈ?übê•+W’••ÅêÕ«Ù¹sgxŸ¦M›Ò¶mÛ ‹#:t Q£FÕ=TIõHeŠkÖ¬á믿ïÓ¤IN>ùäðïž„„’““ßݺtéâï$I’¤`!D’$I’$Iª,ˆ”íä“OfÇŽ%Š •˪U«èÑ£GxÝ;ï¼S•ñªÕùçŸÏ+¯¼BFFF‰õo¼ñF•'55•—_~™Ûn»­Äú¿þõ¯¤¦¦†?ˆˆ`õêÕtêÔ‰víÚqÅW––VªHRÞÏcUŽç‚ .`Á‚\~ùå%Ö/Z´ˆŒŒ >øàƒcʼcÇŽSx°"I’$U¯ââHEŽv‘vqq¤¢‹´Ë›}Ä‹´¥ú¯¼Y‹/£­ZµŠ]»v…÷9²|vdÑÃY‹$I’j' !’$I’$IRdA¤È駟Κ5kŽ«’––Æ]wÝÅ#Y¥Ç7nƒ¦uëÖ\rÉ%DDDðꫯ2yòd,XPbÛ믿ž‡~˜îÝ»³}ûv¦NZ¢4бcG/^LRRo¿ý6?ûÙÏX½zu•Žgüøñ\rÉ%0xð`š4iÂÂ… ¹ñÆyì±ÇŽ9óš5k8ýôÓ9G]`D’$Iªš5kFBB GÝ®¢»ûçää°nÝ:¶oß^b¿˜˜˜£Î6OçΉŠòÒ"©6)¯èqøãêÕ«ÉËË ï]¢ V^Ñ#..®^7”$Ij(üW›$I’$I’T4Ô‚È÷¿ÿ}rrrèÕ«W‰õÅc*~,,,,µïÃ?LFF}úôaÏž=œwÞy<ùä“tîÜ™ˆˆˆð>‡­Ã¿NYëOtÛcÙ?!!yóæ1fÌFMdd$ä¹çž«Ty¡¼c©GÌ›7;[n¹€þýû3oÞ<ºwïÞ. ñè£2pà@vîÜIÇŽ¹ì²Ëxî¹çJ|½É“'óÓŸþ”µk×Ò¹sçpAãXÆSÑ÷·k×®üå/áÎ;ïäúë¯'??ŸoûÛL›6K/½ô˜3ðÁ¤¥¥UøšÖ@$I’¤úåDŠ#‡Ï°|ùrÖ­[Gnnn‰cU¦8rê©§Ò¸qãêªT¯U¦è±jÕ*òóóÃûDGG—ø111±Ü™$I’TEí>IR)D0“™ cXÐQTDDD0sæL† óçI’7}útÒÓÓƒŽ!IR½rdAdÑ¢Eõ¢ òÅ_pÝuוš©¢![¶l_|1_~ùeÐQªDmÏ Aƒxþùçéܹs`ŽWEÔÔT ’ªUNN‰‰‰'‘¤cãï/5TûöícëÖ­G½(½2Å‘².JïÒ¥ -[¶ ptRÍ:pà›7o>jkýúõ|õÕW„÷;²èQÖcÇŽiÓ¦M€£“$IÔìùƒò®7u†I’$I’$©¨¯3ˆœvÚiôéÓ‡_|‘«®º*è85.""‚)S¦0|øpš7oΧŸ~ÊÈ‘#¹ù曃Žv\jãx^|ñE’’’êLÄ@$I’$ˆâ Ñ‹g(ÏþýûÙ²eK¹…‘âG6lØÀÁƒK}ý£]è~ê©§Òºuëš®t\*úù/~¬èç?11ÑŸI’$0 !’$I’$IRTŸ "“&M⦛nj…¿ýío<òÈ#ÜsÏ=DFFÒ£GFÅ5×\t´ãRÇóÎ;ïð»ßý.°ãWĈ$I’¤ 4mÚ´RÅ‘ŠfHÈÉÉáoû›3$¨V¨ªrœ!G’$I5&¢ðð¿J’*A3™É0†U¼±Tò¦ð’¤†núô餧§C’¤íȂȢE‹êLADªÏ**€¤¦¦Z‘T«äääõBQIªüý%Õ¼mÛ¶õ"üõë׳jÕ*òóóÃû­8Sb®½{÷–*#OÑ£¬ÇSO=•Æ8:I’$©&Ï”w½©3„H’$I’$I*¥>Í "ÕeÎ"I’$©¡ˆ‰‰!&&†ž={u»£GŠgY½z5yyyá}¢££KDÊzŒ‹‹ó¼FrxÑ£¼ÇuëÖ±=c; ²‹>Ú|܆ø–ñáï{Ïž=Kýêøÿ¼íŸ|•ù{ÙK;Ú‘L2ƒ̹œKSš8bI’$éøY‘$I’$I’tÌ,ˆHUÈ$I’$UÊGŽV>(.ެY³†¯¿þ:¼Ï‘åƒ#‹g#éÒ¥ 5ªî¡ÖÅ%mÛ¶•ûš¯^½š;w†÷9²¤“@rrr©×¼C‡•z­sÉe‹b6³™À¢ˆâ,Î"õÐÒŸþD$I’TgX‘$I’$I’tÂ,ˆH•cD’$I’j—fÍš‘@BBÂQ·«LqäÈY+ ¨˜r´ÙFâããéܹ3QQu÷2®òfc9üqõêÕäåå…÷‰ŽŽgŽVô¨êÙXb‰eè¡`=ëÉ"‹!f1‹‰L¤9ÍéMoRH!•T0€&4©² ’$IRUª»ÿ’$I’$I’TkY‘ŠX‘$I’¤ú¡*Š#ëׯ'''' 77—-[¶”د}ûötèÐN:°fÍ6lØÀÆKlwòÉ'.u¤¤¤”šÉ#>>žfÍšUý‹P$Z†3(*ˆ„‘EÓ˜ÆXÆÒ’–œË¹¤ZzÓ›H"N.I’¤†ÂBˆ$I’$I’¤ÀYQ]aD’$I’T“š5kF·nÝèÖ­ÛQ·Û·o¹¹¹áYEÖ®]ˆ 8óÍ7øä’KèС;v @bcc‰ŽŽ®‰aÔ $~ho "!B<ȃŒe,­hÅ9œcAD’$I5ÂBˆ$I’$I’¤ZÇ‚ˆj ’$I’¤º ::š®]»Òµk×’Oüç?E¿úUgjŽV™Ä$Æ2–S8…s8‡RH%•>ô!Ï_I’$©jX‘$I’$I’TëYQM±"I’$I’Ž×ᑃäS>%›lB„˜ÈDÆ2–ö´g I&™R,ˆH’$é„X‘$I’$I’TçTTùûÝwsp÷n.¶ ¢ X‘$I’$IÕ!’HzZÒI§€>â#²È"›lÆ3žíl'–XúÓŸTRI&™žô :º$I’ê !’$I’$I’ê¼pAdÛ62ç̓ݻÙÕ§—\Â[K—:ƒˆÂ,€H’$I’¤ 4¢‰‡– 2‘!²Èb cøš¯‰#ŽRH%•4Ò8Ó‚Ž.I’¤ZÌBˆ$I’$I’¤º/‚{ï…%K 9æÏ§ej*··@©D,ˆ4@$I’$IRmtxA$“LòÉçc>&th¹…[ØËÞ‘ ¸€.t :º$I’j !’$I’$I’ê¦ÂB˜3î»>ú.¾¸¨rÎ9¥6 Ï ’˜Hff¦‘z̈$I’$Iª‹¢ˆ:jA$ƒ ö±H&™R¸ éLç £K’$)@B$I’$I’$Õ=¯½¿ø|ø!\~9üáлw¥w· RX‘$I’$IõÑ‘‘½ì%‡²É&DˆÑŒf?ûI TRI&™ó8Nt :º$I’j…I’$I’$IuGVÜs,\©©°t)ôésÂ_Ö‚HÝaD’$I’$5DÍhFÊ¡%“Lö°‡wy—,²È&›gy–DRIe0ƒiG» £K’$©Y‘$I’$I’Tû½ûnÑŒ o½UTùÇ?à{ß«¶ÃY©=,€H’$I’$•ÖœæáâÀnv³˜Å„‘EÏð yä•(ˆ|ŸïÓ–¶'—$IRU²"I’$I’$©öúðC¸óNxã <¸h†ääaA¤æX‘$I’$I:v-hQ¢ ²‹],a ¡CËS<ÀœA )ámcˆ 2¶$I’N…I’$I’$IµÏÊ•pï½0cFÑL o½çtª0 "ULj$I’$IRÕkIË‘ìä=Þ+Q‰ ‚^ô"™dRH!4ÚÐ&àä’$I:B$I’$I’$Õ[¶ÀƒÂԩйsQ!äÇ?†Z^¢° Ry@$I’$I’j^+Z•(ˆlf3‹YL6Ù„1iDI/z‘J*É$3€œÄI'—$IÒÑX‘$I’$I’¼Ý»áÑGá7¿&M`üx¸õÖ¢ÿ®ƒ,ˆ|È$I’$IRíÓŽv 9´ld# YHY„1‰I4¢gqV¸H’L2ÍhprI’$ÎBˆ$I’$I’¤à<Ï<÷Ü{öÀwÀ-·@‹A'«R © bD’$I’$©îiO{†Z6°wx'\™ÈD¢ˆ*QI!…h¢N.I’Ô°Y‘$I’$I’Œ Šfùä¸ñF7Úµ :U¨O‘R~ÈÁ¾IXcD’$I’$©®ê@‡‘\rYÄ"B„˜ËÜ2 "ýéOSšœ\’$©a±"I’$I’$©f­ZU4#È /@j*|ð|ç;A§ T]*ˆT4È iƒ˜Òw /ñ½é]£Ù$I’$I’T=b‰-QYÇ:²É&Dˆ™Ìd"iNszÓ›RH%•   MN.I’T¿Y‘$I’$I’T3v‰¡K˜;~øÃ SÕJµ© RQäÈ@ )d)K¹XÂ"‰¬Ò<’$I’$I ^<ñåDf0ƒ‰L¤-èK_’I&…2Æ48¹$IRýb!D’$I’$IRõ*,„gŸ…±c¡  ¨rãåéÉʪɂȱ@ŽAò(}èÃÓ<Íõ\_/$I’$I’j±# "+YIYd“ÍÓ<Í&Ð’–œË¹¤’J2ÉœÃ9D$I’Nï¸J’$I’$Iª>~7ß ï¿#FÀøñtª:¯* "'Z)Ë™œÉÍÜL&™\Æe´£]U¿’$I’$IªÅ-ÃDB„È"‹iLc,cKDRI¥7½mV’$éY‘$I’$I’Tõ¶o‡qãà±Ç wox÷]øÞ÷‚NUoYÉËËãÿø .dÁ‚Ü}÷ÝìÞ½›øøx ĹçžËŠ+X°`ÿú׿øîw¿ËÀ¹÷Þ{éß¿?mÛ¶=¡L÷q³™Í]ÜÅt¦WÅ0%I’$I’TG%@ú¡¾)ˆ„ñ 2–±´¢çpN¸ Ò‡>Dpl³ßJ’$54B$I’$I’$UÂBxá¸ãÈ˃‡†Q£ Ò;ûդƓœœLrr2wÝuW™‘nݺ1hÐ &L˜P%#µ¢ñ?å§\Çuô¥o•~}I’$I’$Õ]G+ˆLbcË)œÂ9œC )D$I’Êa!D’$I’$IRÕøç?ᦛà½÷ŠùKhÓ&èT¢tA¤¦\É•<ÅSŒd$ÿà4¢Q[’$I’$IuÇá‘ ø7ÿ&›lB„˜ÈDÆ2–t`H&™R,ˆH’$Þ–O’$I’$IÒ‰Ù·î¾’’Šfyÿ}˜6Í2ˆxœÇYÆ2çñ £H’$I’$©hD#zÒ“tÒ™Å,6±‰¥,åNî`ãH"‰xâÆ0¦3e, 8µ$IR0œ!D’$I’$IÒñËÊ‚ôtøê«¢An¿9 „¾ÑƒÜÊ­ÜÍÝüˆG\Б$I’$I’T‡4¢‰‡– 2( €øˆ!²Èb cøš¯‰#ŽRH%•4Ò8Ó‚Ž.I’Tíœ!D’$I’$IÒ±Û±22`à@8í4øôSÈÌ´ ¢2Ý˽´¥-™dE’$I’$Iu\qA$“Læ2—-la)KÉ ƒml#ƒ H(1ƒÈW|tlI’¤ja!D’$I’$IÒ±™;Î<f΄gž×^ƒÎƒN¥Z¬9Í™Ìd^äEÞæí ãH’$I’$©‰"*\™Ï|v²³TA¤+]éF7†3œéLg5«ƒŽ-I’T%,„H’$I’$IªœáG?‚K/…ÔTX¾†:•êˆË¹œ‹¹˜QŒ"¼ ãH’$I’$©ž:² ²•­,b餳žõŒf4éL7ºq7ð<ϳ†5AÇ–$I:.B$I’$I’$Uìå—á;ߥKáÍ7‹fiÛ6èTªc¦0…¬à·ü6è(’$I’$Ij šÑŒRJDæ3Ÿ«¹š•¬äçüœS95\™Íl6³9èØ’$I•b!D’$I’$IRùvì€n(šdà@øè£¢ÙA¤ãÐnŒe,˜à]%I’$I’ˆæ4'•TÆ3¾DAd(CYÆ2~ÊO9…SJD¶²5èØ’$IeŠ :€$I’$I’¤ZêÍ7ág?ƒ¼ýÞyƇHOªfÜÆmt¥+·pKÐQ$I’$I’¤*Ñ e(OðËXV¢ 2›Ù¤‘F[Ú’BJ¸ ²ŸýAÇ–$Iu”ïìJ’$I’$I Q^^Qùã ¡øðCèÛ7èTj`šÐ„ßó{æZ$I’$I’¤ú&–ØpAd+XËZžã9zÒ“™Ì DÒHc<ã ₎-I’ꈨ H’$I’$Iªa_~ C‡ÂgŸÁsÏÁUWH Øp%W2’‘œÇy´ EБ$I’$I’¤jOó1!Bd‘ÅÆð5_G)¤J*i¤q§]’$U’3„H’$I’$IõÅš50p deÁ›oZQ•N:‰$r37SHaÐq$I’$I’¤z!Š(I$“Læ2—-la)KÉ ƒml#ƒ H žøð "_ñUб%IÒQX‘$I’$I’êƒ%K ) òòàý÷aÀ€ IÇ-’Hã1³˜?òÇ ãH’$I’$IõÒá‘ùÌg';KDF3š®t¥ÝÎp¦3Õ¬:¶$I:Œ…I’$I’$©®{á4Î>²³¡k× I',‘DÒIçvng;ÛƒŽ#I’$I’$Õ{GD¶²•E,"tÖ³žÑŒ¦3éF7nàžçyÖ²6èØ’$5hB$I’$I’¤ºì׿†k®Ñ£á•W U« IUæ~î§B~Á/‚Ž"I’$I’$58ÍiN )% "ó™ÏÕ\ÍJVòs~N':… "³™Í¶[’¤ÅBˆ$I’$I’TÂí·Ã=÷ÀCÁ¤IùÍ龈ˆ øÕ¯~E×®]iÚ´)§Ÿ~:>úh©/5wî\Î=÷\Z´hA‹-8÷Üsyíµ×jr4R™bˆa"ù¿ãC><ê¶Gû¹ýøãéÔ©ùùù,\¸³Ï>›èèhºvíÊþð‡Ûïܹ“;=zмysN:é$ÒÒÒøÛßþV5“$I’$©†y®HRUhNsRIe<ãKD†2”e,ã§ü”v´+QÙÊÖ cW Ï?J’j+ !’$I’$IR]sàüÏÿÀ£ÂŸÿ ·ÝVæf#FŒ`ïÞ½„B!¶nÝÊ“O>É#<ÂÌ™3ÃÛ,Y²„k¯½–Ñ£Góå—_òÅ_0räH®¾újÞÿýš‘T®k¸†R¸8ÈÁr·ËÈÈ`Ê”)e>7mÚ4n¼ñF¢¢¢øì³ÏøñÌ­·ÞÊÆ™5k÷ß? ,øæ˜×\C~~>¡Pˆ;vðÅ_‘‘Á´iÓªzx’$I’$ÕÏIªj-hA*©<Àd‘U¢ ’CWr%íiOOz† "ÛØtìãâùGIRmQXXXtIªK"ˆ`&3ư £¨ˆˆˆ`æÌ™ æÏ“$núô餧§C’¤Úi×.øÑàÝwáå—!-­ÌÍ"""¸÷Þ{¹ï¾ûJ¬ýõ×¹ï¾ûX²d —_~9iiiŒ1¢Äv>ú(o¿ý6ùË_ªgÒ1ø„OèC~Ç︞ëËÜ&//ÓN;7Þxƒž={†×oÙ²…îÝ»óÙgŸÑ¾}{†ÎYgÅÿýßÿ…·™3gÓ§Oßá¯U«V¬]»–Ö­[WïÀ$©ÈÉÉ 111à$’tlüý%©Þ8ô^tÄìÙž+’Tãv²“÷xСåC>$‚zÑ‹d’I!…ó9Ÿ“8)è¨òü£$©,5yþ ¼ëM!D’$I’$Iª+rsaÀøç?aÑ¢rË Å®½öÚRëúöíËþóŸðç|ðC† )µÝ%—\>)íLÎäfn&“L6³¹Ìm7n̈#Jݥ逸žbÈ!´o߀wß}—K.¹¤Ä6 àã?þío›Ûo¿µk×VñH$I’$I ŽçŠ$Õ´V´ Ï ²”¥ä’Ë_ù+©¤’M6Wp's2I$1–±Ìe._óuбËäùGIRme!D’$I’$Iª >ÿúõƒ={`ñbèÕ«Â]ºvíZj]LL Û¶m ¾aÆðU‡ëС¹¹¹'YªJ÷qÍhÆ]ÜUî6ééé¼üòËlÞ\T)((àw¿û#GŽ o³zõjN?ýt"""ÂmÛ¶eýúõámf̘Á¦M›èÞ½;ÿõ_ÿÅu×]Çœ9spÂmI’$IR]æ¹"IA;…S‘?óg’I&DˆK¹´DA$Dˆ½ì :v˜ç%Iµ‘…I’$I’$©¶ûì34Úµƒ¬,(ãÍû²DFV|ú¯C‡lܸ±Ôú7{ŒA¥êÓŠV<ÄCü?°˜ÅenÓ®];þû¿ÿ›'žx€9sæËÙgŸÞ¦uëÖ¬[·ŽÂÂÂámN;í4þú׿²}ûvf̘Aß¾}¹ÿþû¹þúë«w’$I’$U#ÏIªmÚÓž¡ e*SYÊRÖ³ž?ñ'Id.sI#Ö´.QÙǾÀòzþQ’TY‘$I’$I’j³Ï>ƒó΃¸8xã¢RHêÓ§sçÎ-µþÕW_¥OŸ>Uz,éD]É• f0#Ien“‘‘Áã?N^^Ó¦McÔ¨Q%ž¿øÅ/˜7o^•O:Qó8ßá;<ÎãŒdd©çÏ<óLÎ8ã ÆǧŸ~ʰaÃJúÒÒàÛ߆×^ƒ–-ƒN$Õ*{ØCOzÒŸþ<Ïó¥žß»w/mÛ¶åóÏ?§cÇŽ$”$ËÉÉ 111à$’tlüý%©Þ(~/zÖ¬`sHR XÉJB„È"‹·y›5¬¡%-9—sI=´ô¦7‘'x/uÏ?J’ŠÕäùƒò®7u†I’$I’$©6ùà8ï<èÝþþwË RšÓœÉLæE^ämÞ.ñÜöíÛ™8q"Ç÷ÍXI’$I’$©I tÒyžçYÍjV°‚‡y˜bxˆ‡H"‰6´ Ï ’C9xLÇðü£$©¶±"I’$I’$ÕŸ~ \gŸ ¯¾ ÍšHªµ.çr.æbF1Š<ò€¢;#uìØ‘þóŸ<øàƒ'”$I’$I’¤â‚È,f±‰M¬`ñ1Äð ’DèÀ†„ "…–ûõ<ÿ(Iª¢‚ I’$I’$ X½.ººw‡¿ü¢£ƒN$ÕzS˜Â™œÉoù-ÿÇÿQXXþ›µ’$I’$I’¶â‚H:éä Ÿò)Ùd"ÄD&2–±´§=H2ɤBúA€ç%Iµ’…I’$I’$)h›6AZ´n ¯¿-ZHªºÑ±Œe¸‚+èD§ #I’$I’$Iª"‰¤ç¡%t (à#>"‹,²Éf<ãÙÎv:Ð •T’I¦'=ƒŽ.IR ‘A$I’$I’´;à‚   Þ|bb‚N$Õ)cKqÜÆmAG‘$I’$I’TG5¢‰$’A³˜Åf6³”¥ÜÉÜÁœÉ™ÄÇ0†1é¬deÀ©%Ir†I’$I’$)8{öÀ!°q#deAllЉ¤:§)M™Æ4.àæ1‹¸(èH’$I’$I’ê¸â‚HqI$Ÿ|>æcB„È"‹Û¹ì$Ž8RH!•TÎç|ºÒ5èè’¤ÆBˆ$I’$I’„à¿ÿþýoX¸ºv :‘TgÏù\ÎåŒf4ÿâ_Dt$I’$I’$IõHQá‚H&™% "!BdÁ>ö•(ˆ\È…t¦sÐÑ%Iõ\dÐ$I’$I’¤é†`ñb˜7þë¿‚N#ÕyS™J.¹<ÌÃAG‘$I’$I’TÏD2Éd>óÙÉN–²” 2ØÆ6F3š.t¡ÝÎp¦3Õ¬:¶$©²"I’$I’$Õ´ßü^x^|ƒN#Õ §r*ws7÷s?_ðEÐq$I’$I’$5 GD¶²•E,"tÖ³žQŒ¢3éF7nàžçyÖ²6èØ’¤zÀBˆ$I’$I’T“^~î¹&O†!C‚N#Õ+·q]éÊ-ÜtI’$I’$I Xsš“BJ‰‚È|æs5W³’•üœŸÓ‰Ná‚Èlf³…-AÇ–$ÕAQA$I’$I’Œ?„áÃáºë`ôè ÓHõNšð{~Ï 1—¹ ÁÒ•$I’$I’¤àµ ©‡€Ýìf1‹ "‹,žáòÈ#„ðvßçû´¥mÀÉ%Iµ…I’$I’$©&¬_—\ýúÁït©ÞÀ®äJF2’ó8´:’$I’$I’$•pdAd»XÂB‡–§xŠ"øß"…RI%4ÚÐ&àä’¤Ú&2è’$I’$IR½·w/\v´l ³fA”÷i‘ªÓd&³ƒ<ÀAG‘$I’$I’¤ µ¤%©¤ò°”¥lg;çï a9äpWÐŽv$‘DÌf6;ØtlIR-à;Ï’$I’$IRu*,„«¯†•+aÉhãÝ»¤êK,ãO&™\ÅU|‹oI’$I’¤ê·kWÑÍHòó¿Y·reÑãôé߬‹Š‚aÊn^"Iª•ZÑªÄ "›ØÄ–M6!BLc‘DÒ‹^¤’J2É d ­iprIRM³"I’$I’$U§)S`Î… [· ÓH Æ(FñÏ1ŠQ¼É›áõ«YÍD&2’‘œÁ&”$I’$©Š½ÿ>üÉ<¢ê IDATìgШDF­+,,z9²èñàA((€ÓNƒÁƒƒÉ)I:f§p C-ÙÈB’E!BLbhÄYœ.’¤B4Ñ'—$U·È H’$I’$IõÖ{ïÁرðË_ÂÀA§‘”F4â1#Dˆ—x‰<ò˜Ä$Nçtã1f3;舒$I’$U­þý!&¦¨ð‘—Wô‘Ÿ_ôQüyAœt¤¤V’tÚÓž¡ e*SYÊRÖ±Ž?ñ'Id.sI#V´"‰$Æ2–!ö±/èØ’¤j`!D’$I’$Iª[·Â•WBj*ÜqGÐi¤©ý¸–k™ÀÎäLîâ.ö±"XÌâ ãI’$I’Tµ¢¢àŠ+ I“ò·iÜþçŠ%IõF,± e(OðËXV¢ 2‹Y¤‘F[Ú’BJ¸ ²ŸýAÇ–$U !’$I’$IRU+,„ë®+ºãâsÏA¤§á¤ ä’Ë.v±Œe¬d%PH!KXp:I’$I’ªÁO~”ÿ|^^Ñ6’¤z-ޏpAd%+YËZžã9zÒ“ÌDÒHc<ã âGùóC’TkùN´$I’$I’TÕ~ýk˜7fÌ€ví‚N#589Èt¦Óî¼Â+RH>ù%¶ÙÆ6¾äË`J’$I’T]ú÷‡¸¸òŸ…ääšË#Iªâ‰D¾äKV°‚Çyœx†gJD&2‘,²È#/èØ’¤J°"I’$I’$U¥… aüxxðAè×/è4Rƒ4ÜÈìfw¹oZFÁû¼_ÃÉ$I’$IªfpÕUФIéçš4áÃÍV’D g8Oð_ñ+XÁd&Gò(ýé_¢ ’C9tlIRüÛ½$I’$I’TU¶lŸü.½F:Ô`]È…@Qé£Þk¼ÆF°•­U8 Ij,„H’$I’$IUaΘ1ž|Ú´ :Ôàý˜óïÑžö4¦q™Û|Àä“_ÃÉ$I’$IªW_ û÷pãÆpíµÅ‘$ÕmGD>á~ůhF3&21\Æ0¦2õ˜ "Z¾Å·˜Ãœj‰$Õ?B$I’$I’¤µy3Üp\=\tQÐi$rgñ1s6gÓˆF¥žßÏ~–³<€d’$I’$U³Ÿþò» B^\yepy$IõF$‘ô¤g‰‚ÈR–rw0žñ$‘Dq cÓ™Î2–•ûõ (`! ØÊV.ã2~ÌÙÌæ$ÕuB$I’$I’¤5bDÑ]'M :‰¤#´£oñWqU©çшðRI’$I’Tͺuƒ³Î‚ˆˆ¢^½ G SI’ê¡F4"‘D2È`³ØÌf–²”[¹•½ìe c8“3‰'>\YÉÊðþò!»ØÀAð*¯r:§ó/2&IªK,„H’$I’$I'âφ—^‚§ž‚6m‚N#© Miʳ<ËgpÓ™^ÓÑ%©Nˆ :€$I’$I’Tg-[ÂĉA'‘TIpïó>q_ò%ÿá?ìaÍiÎÁƒÙ°a¹¹¹¬]»–ÜÜ\Ö­[þ|Æ ¬Y³†7Ò¦M:tè@ÇŽ‰ ?ÆÇLJãâ∎ŽzÈ’$IúÿìÝy|[Õ>üG^´xß$[‹m¢ÄYÀ"l KHqYšBÛ ´3ÍÐRRh ´C;@;¼ÀL‡It aiK)KÒ–²d m $%¡â„@â$lb[²Ûòny=ïp/ºZ¯¼ÉËóõÇY×w9çJº›ÎsÑ ÓÚÚŠ––ůÇãQ<÷ù|ðz½ø-h4¸ö¼ó`2™`4QTT$ÿ+ž¡°°0ÙU$"¢, ip|úó]|¹È•Ñ bC ¸Ïãy<ŠGa…uRÊÚÞÞ!ººº044„ÞÞ^ô÷÷£¿¿½½½BWWFFFÐÑÑqèëëC çÛÑÑ‘‘Oê,ÍCk\Éðð0:;;U×#tžñäää 55UÕ¸©©©ÈÉÉ ®×ëa0äçÙÙÙHKû¤ÙzJJ rss£Ž¼üŒŒ èt:èt:ddd -- ÙÙÙÐh4ÈËËäææ"%%E^Fè4D3!DDDDDDDDD£±kðûßúÓ'¡"𲆆†àñxäp‡Ëå—ڿ„'¯|®Ó\p|ÕοtÂãñ`xø³;ÑeeeÁf³Ád2Áf³aîܹ°Z­(..F{{»ñx<8pà¼^/<bÙùùùr8Dú•B#Á!~)EDDDDD4; Ä vø|>Å0©q©$''G{¢¸¸‹/†Éd‚K«Ü40¯×+ÏëÃ?”C#¡ JÓÒÒ£Ñ7H¢ýt9DDD‰ØýèEoÜñàU¼ŠX€ÿÆc36ËÁ¿ß¾¾>ôõõ¡½½=æß@@?ÒßÁáŽDäååA£ÑÈa†ÌÌLyÿ¨Õj‘™™)›••…ôôt@zz:ìv»ü¿ÐCp"Xð<â OÄ388ˆîînUã@OO†Kቔ‰´ŒÖÖVÅ<ü~¿üwww7å×h``===ªË@‘H¯CNN ƒü·^¯GVVVÔ¿³³³a0ä¿322““1C” „%j`¸é&à _Ö®Mviˆf-©ÑŒßïGss3\.WÄdž†Ec½^‹Å‚Sþt ¬_²âLã™(ßR.‡5¤ÇüüüQ•Ëï÷G-˾}ûb–)´ f³Y*±X,c^oDDDDDD4qúúúà÷ûçªÒóHÃ<OØ]¿ƒÏóóóQZZŠ¥K—*†IçŠV«:nÌå–Îe£•³¡¡o½õ\.ZZZ088Væàr—3Ò0³Ù F3ærÑô500€—ú_BzV:5ƒñ'04<„ÁÔAü+þ7}ÿ& ÜDH!ƒÁ 理ž(¤ý‘Á`@^^ôz=222äž&¤kááiúôôtdee…õtA“GêYEöövŸõ¦-DäpP @oo/:::ÐÓÓ#‡dûúúÐÓÓƒÎÎN¸á˜üü|äää 77W‰H¿ùùù‡çææ"//¹¹¹ì™Æ!DDDDDDDDD‰zà ±øóŸ“]¢©¿¿­­­1C.— ^¯WÑ£Gh¨Âáp„…+l6Û„A'}™XQQs¼XÁ‘ššìܹŠF6Ò—ÑÂ#Á½‘°a ÑØõõõ© vøý~8ΰ;Š%¤ó9‡Ã5,Q\\ŒÔÔÔI¯§Tµ¤àK¬uSWW'ŸûJ 5%:1C#ÁÃL&SÄ»£Qò !ÐÖÖ†ÖÖÖ¸---òßÝÝÝÀ_¬Ð a­šSRÞ•ŽŒŽ ätç ·/()Ä9gžûìrÃûŒŒ èõzäåå!##c\“4u¥¥¥ÉÇ.º¬®®.tuu¡«« }}}èèè{§éììDgg§<¬³³S> Âæ­ÑhPXXˆ‚‚‚˜Ò¯ô<++kBëLÓ ’‰ˆˆˆˆˆˆˆˆÑÖÜwpë­@yy²KC4­´µµÅ z„Þ54èQUU€(++Cvvvk—¸ñŽ9r¯¾ú*ššš00ðÙÝð¤†5ñ‚#%%%HII™èªM Ò â;¤a>ŸOÑ»#ÔÏÏχÝnh˜‰  ƒp‰'¸ç”hë½®®{÷î…ßï» ÞsJ¬ ÉdÜ‚ˆh&ëíí…ÇãÛí†ÏçSüív»áñxàõzáóùÐÖÖ!„bz½^Ö ½¼¼‡C1lß)ûÐÔÖ„’Ô”jKaO³Ã#ŠQŒ” S› â“_¢$ÉÎÎFvv6ŒFã˜æ300 DÚÛÛÑÞÞ®I?ž8qBñ<´G9­V‹ÂÂBF9ŸOy466Âår¡¡¡N§.— 'OžDoo¯<~VVJKKQ\\ ³ÙŒåË—GìaÀh4B¯×'±fDTPP€‚‚ÕãwttÀívÃëõ*B#Rèëã?FSSš››188(Og2™`µZa³Ù`³Ù`µZQZZ ›Í&‘‘1U¤1à·2DDDDDDDDDjÝ?PX|ãÉ. ј¨ z¨ 8¦¡ñŽ477£¦¦FU (Ò#ADDDDD3_pã|5=xx½^ +æ!<‚äÛíöˆ½=X­Väåå%©¶4÷B¯—Mhkk‹ù>•Îý~?|>†††óˆô$I$àBDª³³õõõ¨««C}}}ØßÁ=H½—••Án·ãüóÏGii©Üл´´9¼ñÑŒ•››‹ÜÜ\œvÚi1ÇBÀív£©©I"s:¨­­Å®]»ÐÔԤؾbΜ9°Ûí˜3gŽâïòòr^ûO~+GDDDDDDDD¤Fk+ðË_ÿñ»K¦)JMУ±±]]]ò4Z­………r#»ÝŽªª*E£©ËðÔÔÔ$ÖŽ&["Á‘àF3¡µµµp¹\aw^U±ÙlÐjµ]U"""""R!ôØ_MØ#˜N§CAA¢Á¼t©={_ ‰¦×ëa±X`±XTO©›ÐÏ€Ô ISS:;;Ö+4:¬¸¸˜×cˆf™¦¦&;vLz>ZZZ‹En„½|ùrØív”––Ê!ƒÁäšÑt Ñh`6›a6›cŽ×ÒÒ§Ó‰ÆÆF444ÈÛ§¿üå/¨««“o0–šš ›Í9õÔS±`ÁÑ&!DDDDDDDDDjüìgŸAþùŸ“]š…ü~̇ßïGCCº»»åi¤†6Ñ‚ÒcII RRR’X;šî‚ïÀêp8¢Ž'Ýy5^pÄãñ`ddDžN Ž7Š },++CvvödT—ˆˆˆˆhÆÎ5ãõŽàr¹ÐÒÒ‚ÁÁAÅôÁ ÛƒÏ=£5t7›ÍÐh4Iª-ÑøP{óIpO9Ñ‚$R$~¿?ìœø,¸ÚÛH¤ o¬@4}H×ÃŽ9"?¾ÿþûrLúìÛívœqÆX¿~½¼ý9í´Ó•••äÑlRTT„¢¢",[¶,âÿ¥Pl]]|Ý¿®®Ï?ÿ …ÜpC²KBÓ„š GCC†††äiB³;Å—ôÁÿ#šÉt:ªàÈÀÀZZZ c‚?c555عs'Nž<™ppÄjµò‹&""""Jª¾¾¾¨½ „s:rC*IpƒpéœÒáp„ “Î9M&ÒÒØŒˆh*>£jI°XÛŒºº:ùzU{{»búÐíE¬HÌf3·DQ!pâÄ 8p@ñë÷û‘ššŠ¹s碢¢ë֭í·ÞŠ… bÁ‚ì郈fŸ"$òøããßÿýß¡ÓépÖYgá /ÄÊ•+qî¹çκ€!DDDDDDDDD±<öpÆÀÙg'»$” @ €¶¶¶¸AІ9R£é‹h»ÝÖ°»¼¼YYYI¬M„ÑG‚ÂHwQmhh@ww·N48º=•žF¤§§wµi–kkkÃ_þò¼þúëØ³gŽ?Ž´´4,_¾W^y%V®\‰+Vð:1Ñ$1¸ôÒKq饗ÊÃä°ÞSO=…ÿøÇÐjµr@䢋.Š+–6³#3»vDDDDDDDDDcÑÓüáÀ=÷$»$$¸¡N¤ÆØÁÁBƒÒWƒc—––òËc"ŠKmp$Òö*48ÒÔÔ„ÎÎNy­V‹Â° Hh/$ååålDHDDDtsµ=xx½^ +æÚ \~~¾âfÁ“­V+òòò’T["¢©)¸’xçÎ@ø¶;Z¤¦¦~¿>ŸCCCŠyDÚvÇ ’$p¡Ù£®®üã±sçNìݻ˗/Ǿð¬\¹UUU €M!eeeØ´i6mÚhjjÂîÝ»±{÷n<óÌ3øÉO~‚üü|¬]»Ÿÿüç±nÝ:dgg'¹Ôã""""""""¢hž{€/9Ù%™b5œ–].ÚÛÛÓ© z”••Íø»ÿÑÔc0`·Ûa·Ûcާ&8¢fûé‘Û?"""šî¤»ÌÇ v z—y»ÝŽªªªˆ…y—y¢©å’K.Á®]»&mú±.o¢ç7SéõzX,X,ÕÓDêÝ)tßPWW‡ææf8Nô÷÷‡-3Vh$t{wš¹¼^/¶oߎ§Ÿ~ÿøÇ?PPP€Ë.» O=õÖ¬YƒÜÜÜd‘ˆˆT²Ùl¸öÚkqíµ×>úè#¼ôÒKx饗°qãF¤§§ãòË/dž p饗B«Õ&¹ÄãƒßEó›ßëÖFc²K2­© zÄ»C¾Ùl†ÃákèœÌ;äk4Ås½^ÂÂBœqÆX¿~=6lØ0¥/$k4!"ñ“m²Êm]Lô2©±žÇ"™õ˜Èeÿõ¯Ń>ˆ={ö`xxóæÍ÷¾õ-\wÝuaŸýñ0Á¿ßÚÚZU=$EzdIDDD4Yü~Ü`‡ô¼¥¥ƒƒƒŠéƒðJÇ3v»=jƒ^³Ù÷½½˜ Ûĉ0ÞõšÊû¤©þòxì3Ë—/Çm·Ý†Ë/¿o¼ñ.¹ä’)WÆ©¦¡¡<ò}ôQøý~|õ«_Å~ô#Øl¶„ç5™×¢µ7e „ˆ(A „Ðxb „ˆ(2BˆˆhJxäà–[¯ÈÌLvi&•ôEg¬†ÄŠ/8¥/7gJÐC­XþyäÜ{ï½x÷Ýw‘››;É%‹ y&wSi¹ãm¦B:„¥K—ÊÃ>úè#TUUÁívûò’¥¿¿­­­qƒ#¡ 7ƒmF{,--ENNNkGSTÑt5[·_Á=’Å zøý~8Nttt(¦nøª¦¬ÉdBZZZ’j›<œx<¤x¦rãÛéV†Ùrþ¬–”Œ”®¿¶··+¦W³ 6÷£cÑÐЀn¸/¿ü2V¯^»îº çž{n²‹¥Jqq1Þÿ}˜L&ÅðÚÚZTTT02‰™:x<ö™ôôt)×»ÑtÐßßßþö·øÉO~ǃÛo¿wÜqGB½iO…@%b»ØžìbÐ @lßÎ÷Q¨mÛ¶%»DDDB¬^-ÄúõÉ.Ÿjkk‡»víO<ñ„¸ï¾ûÄ–-[ÄÕW_-ªªª„Ýniii€ü«×ë…Ùl‡CTWW‹Í›7‹;ï¼SlÛ¶M¼øâ‹bÿþýÂétŠ‘‘‘dW/)â]bݼy³øá¨öþûk׊¬¬,‘••%Ö®]+Þÿý°ißÿ}qÙe—Éã­Y³&l¼öövqóÍ7‹9sæN'JJJÄu×]'Þzë­¸åýýçþgÅÿÄå—_.²²²„ÉdÿôOÿ$ZZZFUÎhÞ}÷]qÉ%—ˆŒŒ ‘-.½ôRñÊ+¯„•5RÙ£Õ)˜šõo]¨©Ÿ4݇~(®ºê*‘——µœ¡Ó©]Ï/¼ð‚8ï¼ó„N§åååâæ›o ×75óUPjßj—{øða±víZ‘™™)rrrÄ•WEñZm IDAT^)Nž<9êeæ5óûý"//OÍ*™qúûû…Óéû÷ï/¾ø¢Ø¶m›¸óÎ;ÅæÍ›Euuµ¼IMM Û‡ØívQUU%®¾új±eËqß}÷‰'žxBìÚµK>|X´··'»z4Áöïß/öïߟìb%l&l¿€p:âðáÃâ7ÞPìÇ·lÙ"6nÜ(ª««…Ãáf³9ì|0Ò9áÆÅ–-[ç…o¼ñ†8|ø°p:É®ò´ÀóHžGÏc¦ŸGJuR[æHõözª™g¬é#½~Ñ^µç®jÞ{‘–@èt:yœ±|F‚—7wî\yþUUUŠñâ-£¾¾^Q¾'Nˆ®®.Űúúúˆe˜ÍçωêííUì«wìØ!¶nݪØW¯ZµJ,Z´H˜Íæ°óîÐsïà}µtþ¼¯žÍçàÏ>û¬ÈÉÉ .»wïNvq–••%€ªqÕnCÆrM/Þþ8Þ¾2’Ñ^ŸLt_£ö¸j¼¯GÆ*{¬c 5eMt¾£Ù¯eŸËX¯KŸÑÛÛ+þô§?©jÈ£f=KãnÛ¶Môôôˆææf±iÓ&qÝuת¾ÑÊoþ±¦ –èz‹·Ü?üP”––ÊŸŸÏ'ž|òIqî¹çŽyÙ‰¼fßùÎwĺuëT­“Ùl´áÃxû$60¾fBƒj"š¦âö+¸Ñ¨´¯ n4*ío¥F£&¬qP~~¾X´h‘¢Ñèí·ß.¶nݪh4úÑG‰þþþdWyFây$Ï#ƒç1Î##•ùª«®ŠXæhËË<y?Ä_m]ã }Þßß/–/_.~øa!ÄØ>#¡óß¿¿8å”SÄ{ï½§Gí2Þ{ï=±|ùrÅ´###âÔSOï¾ûnÔ2ðüybIçÝRØ3øÜ[ {JçßZ­6b€Äl6‹E‹‰U«V)ž[·n;vìP„=‡‡‡“]å1{ì±ÇDJJЏ馛D___²‹3*_ùÊWÄÍ7ß÷š}¢û¡ÑL;–ýq,c½>©f¿ ö¸j¢®GF+{$jËšè|¥ÿ%º_ë>e¬e‰6í¹çž+ž|òIáóùäuTVVÖ˜_ÍrFóÞŽ¶^BÇ‹7€X¼x±xæ™gDKKK̺¨§4<Úñf"ï1âšk®µµµ¢½½]Üxãâì³Ïëׯ—‡}ãß_üâã–uªûðÃEee¥°Z­âøñ㪦™ ͧÿ$""•4Ð`;¶ã\d¢8¢váED4Ë=òÈ#ؼys²‹ADD³Ù‹/W]¸Ý€Ñ˜”"ô÷÷£µµÍÍÍp¹\Q½^/†‡‡åéôz=, ÌfsÔG›Í†ÜÜܤÔk¦‰×5xOOŠ‹‹ÑÝÝ ¸öÚk±|ùr|ûÛßVŒ÷ÀààÁƒxòÉ'åñ–.]ŠÛn»M1Þ/ùK:t?ûÙÏÙÙÙ8yò$ äqêëëa·ÛãvY«ìÏ?ÿ<®¸â Å|W¬X§Ó)S[ÎH6lØ€µk×bãÆò°ãÇcÁ‚ŠrE*g´²‡W»~¢Í/‘úi4¼þúë¸à‚ ¢Ö9RyÕ¬çHü~?N=õT´¶¶&\_5"Í?V=‚ç?–÷E¤ånܸgžyfØçæ×¿þ5¾úÕ¯ŽzÙj^³††ìÙ³>ø êëëñüsçÎ:>©ç÷ûcîßš››ÑØØˆÁÁAy½^üüü¸û9³Ù F“ÄÚQ°šš€ÃáHrIˆˆ3Û¯¾¾>yßç÷ûá÷ûÞKÚšš000 ˜^Ú7J¿Ò¾0Ú°ââb¤¦¦NX}HžGò<2x³á<2R™?Ž‹.º(n™¥é#½žjç™Èû!Þÿ$±êšÈ²à›ßü&ÚÛÛñôÓOÛg$xyµµµX·n¶oߎ³Î:K1N"˨¬¬Ä¯~õ+,[¶ ðòË/ã§?ý)^{í5Å´<žºúúúbc„s»ÝaïÙüüü˜ÇÁÏF#ÒÓÓ“TÛpµµµ8ãŒ3ðýïwß}w²‹3j½½½¸ãŽ;ð›ßügŸ}6ª««qÅWÀjµ*ÆKt?4šëjcÙ'"Ñë“jö j«&êzd´²G;¦PSÖDçI¼ýÚxíSÆR–H4 ~õ«_áúë¯W àðÞ{ïá׿þuBËIô½k½§f¸F£Á³Ï>‹/}éK ×%Þ²¢o&òÓh4ؽ{7V®\ p¹\°Z­ŠaMMMp8ðx<1Ë:ttt`õêÕBàïÿ{Üóèɼþ­½)!DD b „Æ!DD‘1BDDIwÓMÀ›oŸ^ÀO@mmmqƒ###òtj‚eeeÈÎÎ÷2Stñ¾TéîîFII‰Ü§¤¤o½õÊËËãÕ××ã¼óÎCsssÌñ<.¼ðBÔÖÖV¯^ÞÞ^üð‡?ĪU«––6.e×h4hkkC~~¾¡ËU³žcM?šú&R>5_!„Ž7–÷E"ós»Ý0›Í£^¶š×L£Ñ ¼¼ëׯǭ·ÞŠ’’’˜e§ñ§&8Ú@V§Ó¡   î~³¤¤)))I¬ÝìÀ@MW‰n¿¤s½xÁiXh¸?Æ zX­Väåå{½iâñ<’ç‘Áó˜ ç‘‘ÊÜß߃Á0ª2':ω„$2ßXó{æ™gðãÿï¼ó²²²ŒÏ9u]]V­Z…Ç{ ^xaØ8‰,ãg?ûŽ?އz°nÝ:|ó›ßĺuë–Ëóç™!ô˜&ÒqMðsŸÏ‡¡¡!Å<¢ÓD;Î þ,·ë¯¿GŽÁ[o½5#®tttàå—_ÆŸþô'¼üòËX½z5zè!ùP‰î‡Fs]m,ûãD%²MV³_ëqÕX¯G&RGµeMt¾‰Œ?Þû”±”%Úxn·ÅÅÅŠáõõõ¨ªª‚ËåJh9‰¼·ã­—Xõˆ¶®[ZZPXX8.u íx3‘÷˜F£Agg§üäÈÈRSSÆ¥¥¥©:¾›Ž?ŽE‹ᥗ^Âe—]s\Bˆˆ¦!Bh<1BD!DD”t W\Üw߸Îö¿ø¾ñoDý¿ÍfÃYg…eË–aÙ²e(..†ÍfƒÉd‚V«×²Ðøˆ÷ÅÄÛo¿¯ýë8tè -- ===ÐétŠñ²³³å;á§§§‡}‘*1 èííðÉ€wß}7^xáø|>œ~úéX¿~=¾ùÍoƽßh?„W[ÎHÒÒÒÐÛÛ÷½=–†Ÿ?øÁðÊ+¯Àív+ަ¾¡ÔÎ_m=Ô®7µËö¹˲#M«¦n4uy½^x<455¡¾¾o¿ý6:„÷ß?¬±­ä¢‹. »«-?BˆhºJdûõÃþ÷ÜsOÌq222°`ÁùwÑ¢E0(,,DQQŠŠŠØ{Ç,ÁóHžGÆ›G¢u‹4|*G&ÒRmm`9–@ÈXëmYÇŽÃÊ•+ñÚk¯añâÅòð±|F¤åÍŸ?íííxüñÇQ]]6N"Ëðù|X¸p!àv»Q]]#GŽ@£QöÊÈóçÙkxx---hiiAkk+|>jkkqìØ1ù7ÞûöŽ;îÀüÇLHùN?ýtTWWOØü“©§§›6mBVVžxâ cÛ©v,ûãhÆûúd´áj«&êzd"eW[ÖDç›è~m<÷)c)K"u ]G‰\‡VûÞŽ·^b•/‘ã…±¾Þ±>‡‰¼ÇÆãk:š?>®»î:ÜqÇ1Ç› ±Åº‰ˆˆˆˆˆˆˆˆf§8v xðÁqŸõúõëa0àr¹àv»át:áv»w8ojjÂsÏ=N‡ââbX­V9‘ž›L¦°/`iêØ¶m®¼òJùyQQš››qÊ)§(ÆknnFQQ‘b¼ÚÚÚ¸wÇËÍÍÅ<€x---xíµ×°uëV¼ñÆøýï?®u‰Dm9#),,DKKKÜ»mE¢ÑhÐß߯ø¢¢­­-l¼±®Ÿ±ÔoŸOnÙÒÒǃÎÎN8pðÉ>J ‚Áh4Âd2)†™L&Fù9o0;ð<2:žGª7ÝÎ#§²‰¨kOOÖ¯_ûï¿_Æç=t×]waÞ¼y¨®®Æ /¼€³Ï>{ÔË0¨ªªÂ³Ï>‹£GbË–-3òu¦èäcŸÏ¯×«8Ήtìzc†œœG=ö‰vwýñPZZŠ'NLØü“)33ÿó?ÿƒ%K–ÈÃÆ² Q;íXöÇÑLÖ~%‘ãªd_T[ÖD%º®ÇsŸ2Ö²DÒÖÖ†‚‚ŰææfÆ„—“È{;Þz‘æ¯æøSâñxÂz; ­Ëxš¨÷ØLÑÓÓ—Ë…²²²dEBˆˆˆˆˆˆˆˆˆ‚ýå/€^TUû¬F#®»îº¨ÿïëëCss³ ~›û?""Jв²²˜çz¿ß—Ë¿ß/ÿJû:¿ß††¼õÖ[p¹\hii » ¬^¯G~~>òóóåýô<Ò0³ÙÌF»Ó Ï#y9^¦ÛyädKä½<u½á†pöÙgãúë¯W”I1.|å+€ßþö·øÂ¾€Ý»w+Þû‰.ãºë®Ã=÷܃®®.¼ûî»—Éóç飯¯/âqH´a###ŠyèõzÅqGYYÎ9眈Ç&6›-©¡Ök¯½6lÀßþö7|îsŸKZ9ÆJ£Ñ ¾¾>lûœ––†¬¬,ùùX¶!j§U»?Nö¶6µÇU“y=2ÚzR[ÖDç›èºï}ÊXÊÉ«¯¾ÖSÂÿøG¬^½:áå$r¬o½ê?%þóŸËŽT—ñ4Ö÷ØL÷ýïz½k×®MvQÔDD”ˆíb{²‹A3±};ßODD¡¶mÛ–ì"Ñl¶aƒ«V%»1õööŠ>úH¼ñÆbÇŽbëÖ­âöÛo7nÕÕÕÂáp³Ù,(~óóóÅ¢E‹ÄªU«ÄÆÅí·ß.¶nÝ*vìØ!Þxã ñÑG‰dWoZ ¾ÄÚ××'êëëÅ3Ï<#.¾øbQQQ!>øàÅø|ð°Z­âÑGGx½^ñØc «Õ*Nœ8!W__/–,Y"~÷»ß‰––ÑÙÙ)^zé%aµZÅóÏ?¯Xþš5kÄáÇE n·[üà?W]uUܲÛl6ñæ›oŠñç?ÿYØl¶ˆõŠVßDÊÉ{ï½'æÎ+víÚ%º»»Å¡C‡Dee¥x衇b.S!6mÚ$¾øÅ/ŠÆÆFÑÕÕ%^yå±fÍš°qÕ®Ÿhë"‘úær»Úõ¼fÍñõ¯]Ô×׋þþ~qâÄ ±iÓ¦Q×7”Úù«­‡Úõ¦v¹}ô‘(--?þ¸ðz½¢µµUlß¾],[¶lLïÉxõ;ï¼óDUU•ªu@‘ÅÚ_­ZµJ,Z´Häææ*öUZ­V˜Ífáp8DuuuÔýÕÐÐP²«G*íß¿_ìß¿?ÙÅ "JØTØ~õöö §Ó)öïß/^|ñEñÄOˆ­[·Š;ï¼SlÞ¼YTWW‹ªª*±hÑ"‘ŸŸv¨Óé„Ùl‹-UUU¢ººZlÞ¼YÜyçbëÖ­â‰'ž/¾ø¢Ø¿¿p:bpp0©õ-xÉóÈxë,–éx©¶ÌÑŒuž‘†%ò^k]C‡ýâ¿K–,½½½ÇËg$Òòž}öYqê©§ ·Û-Ktýýý¢  @üÛ¿ý[Äeòü9¹‚Ͻƒ‚¯KÇ 999aÇ z½^q¼põÕW‹-[¶(ŽvíÚ%>,œNç´<ÿâ¿(²³³Åk¯½–좌±bÅ ñæ›oŠÎÎNÑÓÓ#öïß/.¾øbq÷ÝwËãe?¤vZµûãXÛÚPã}}2ÚpµÇUu=2’hëImYïXökã±O 6¯û²eËÄöíÛEKK‹¼ŽÊÊÊD}}}ÂËí±f¤õ"„úãOižçwžxê©§bÖ%ÖºHd¸‰½ÇF{Ü3 ‹[o½U¤¤¤ˆgŸ}VÕ4“yý Z{Óé¿æ‰ˆ&!4ž!"ŠŒ""Jš‘!Š‹…¸ï¾d—d\ôõõ)mÛ¶MÑpH Žh4š„‚#‡]]]É®^ÒEjle±XĺuëÄ/ùK"N÷Þ{ï‰K/½TdffŠÌÌLq饗Š÷Þ{/l¼>ø@¬_¿^äææŠÌÌL±|ùrñÜsÏ)ÆyõÕWÅ•W^) …V«sæÌ·Ür‹èììŒ[þ;vˆ9sæ­V+N=õTñ /„Õ+Z}-g4o¾ù¦¨ªªz½^”––Š{ï½WÕ2}>ŸØ°aƒ0"33S|þóŸ aãª]?ÑÖ…Úú…¾ÔHd={<±qãFa2™„V«‹/Û·ou}C©"õP³ÞYîáÇÅÚµkEff¦ÈÊÊ«W¯GŽõ²Õ¼fçœsŽ8ï¼óbÖ¶jkk‡ŽôÈÎÎŽØ U zHQ·mÛ¦hˆ:–PlS¡A5ÑhLÇí— ÝOßyçbË–-Š}µÙl©©©Q…‡3·lÙ"î»ï>9@"¶··'»ÊÓÏ#Õ—3žGN¯óÈDßjêœÈ<£›È{y,u4L§Ó…½þ¡ãŒö3¸B§Ó©˜ÿ“O>9ªe ŠSN9E¸\®ˆÿçùóø‘®çJûòàkº‘öåiiiª÷åÁçàÒ¾Üét&»Ê“b``@lذA¤¦¦Š[o½5,5üýï_ÿú×Å©§ž*´Z­0 âŒ3Î>ø QŒ›è~(Ñi…ˆ¿?"ö¾2ÔD\ŸŒ6\íqÕx_Œ&ÖzR[ÖDæ«v]OÔ>%Øx¼îGŽ«W¯YYY"33S¬]»VÔÖÖŽz9ñÞÛ‰¬µÇŸR}êëëEuuµÈÎÎŽZ—Xë"ÖqH´õ©æ=¦ö'ÑcÛ©èĉbÅŠB«ÕЧŸ~ZõtS!¢ùôŸDD¤’lÇv\ƒkâL‡F£ÁöíÛú®#"šíyälÞ¼9ÙÅ "¢Ù裀SOÞ|8÷Üd—fÒô÷÷£µµÍÍÍp¹\Q=FFFäéôz=, ÌfsÔDz²2dgg'±vDD¤–ßï¹hnnFcc#åiôz=òóóãîJJJ’’’ÄÚQ²ÔÔÔG’KBD”˜Ù²ýêëë“÷õ~¿~¿?ì¹4¬©© Šé¥céWÚ÷G{Îc"¢égÇŽعs'~ó›ß$»(ÓN___Ôýk¤ý­ÛíFhsÎüü|ž4ÖþÖh4"===Iµú~÷»ßá†n@FF¾÷½ïá†n€N§Kv±ˆˆ Ñh¶ÿ4y|>þû¿ÿ?ûÙÏ0oÞ<<ñÄ8ãŒ3TO?™×¢µ7M›ð%Mû÷iiÀé§'»$“J§ÓÁb±Àb±Ä¼X900€–––¨ …kjj°sçNœÅ± TU$!"¢ñ±bÅ Üxã Ý%{º9Æ vHü^¯â&8€ò[ÚÙíöˆû'Þgj0›ÍøùÏŽ[o½>ø þë¿þ wß}7®¸â lذk×®e•ˆ&•t³ö29Ün7vìØ§žz o¿ý6–,Y‚­[·âºë®ƒ^¯OvñF"""""""""ÉþýÀ™g&»3‚ôEg¼Bj‚#aw­/Ô#§<Eó‹{J.²Ë²Q’Q‚ªþ*Þµžˆ¦½þþ~´¶¶Æ y¸\®°†(j‚l|BDDD“M:FI$¤Ñ××·Qn]]ü~?šššÐÙÙ¶L5¡iXqq1RSSÇ»êDDÓÞtk”*<âíC¤ç~¿_1½N§CAAba·ÛQUUqb2™–Ææ—ÓUyy9~úÓŸâÇ?þ1ž}öY<ù䓸¾€ÜÜ\\y啸üòËqÉ%— 333ÙE%¢nºío§£††¼ôÒKxþùçñúë¯#33W]uþó?ÿ+W®LvñÆHˆˆˆˆˆˆˆˆˆ@ààAàê«“]’Y!€üðßÿÉo_E@=ò‘tÈCJPß°m¢ )¤@^xåùeìÎÀ³kžÅÀÀ€<,ô.²ÑKJJ’’2é뀈féîØñ‚###òt¡Aªªª°íXYY²³³“X;"""¢ñc0`·Ûa·ÛU|‡÷h€kjjäç¡Ç[@ä;¼G ’X­Vètº‰¨:‘n$/Øár¹ÐÒÒ¢¸¡   JÛt»Ýu;Ï›ËÌNøÚ×¾†¯}íkhjjÂöíÛñÜsÏaýúõÐét¸è¢‹°jÕ*|îsŸÃ²eË"%"šº»»±oß>ìÙ³¯¼ò <ˆœœ¬Y³Ï<ó ª««a0’]ÌqÅ@œ8´·³‡õ¡ï“`G‚?Íh›— þ)@æb.òSCÿóÙO! ¡»@ô+ï$úXWW‡½{÷¢±±]]]òr¥»ÿÅ Žð®±D‰Ú ‡ÛíVÜíMjl"mc***¶=6› Z­6‰µ#"""šú  ,KÜ^*%j×ÖÖ&ÔÈ8V¯$ldLD³šð^ð0µá=é\šá=6› ßýîwñÝï~^¯;wîÄÿýßÿáž{îÁ-·Ü‚¼¼<œþùX¹r%V®\‰3Î8ƒ×‹‰ˆ¦€ÎÎNìÝ»{öìÁž={PSSƒ¡¡!,X°]tî½÷^\pÁ3úØ€"""""""""Ø¿HO–,IvI’¢ ]ð}úÓ‚´¢-Ÿþxᕃmh“ÿîBWØ|Ò‘Ž„…6æ`*Q©z„ŽcÀØïÆ£öN²j‚#N§ò4Z­………q{)++CZ/½Mw±¶Á Uš›•75AÒÒR¤§§'©fDDDD$5VKjÈ«s]]üÜï÷+¦—nD+4<Ìd2ñ¼’ˆ¦´àsæXÁ¿ß¦¦&tvv*¦ÖIÛ?‡Ãu;ɵÐd3™LrÏ!B>|»wïÆž={pÿý÷ã{ßûrrrpþùçã¼óÎCee%ŒFc²‹ND4£ŒŒàĉ8pàöïß¿ýío8x𠆇‡±hÑ"¬\¹·Ür V®\‰’’’dwÒð쑈ˆˆˆˆˆˆˆÞ}¨¨ôúd—dÌúÑ/‡9¤@‡ð½ðÊÿoE+úѯ˜‡¡…(„ &  ° b˜#øy²’TëÄŒGp¤¹¹555û„6Ñ!(P IDATôÈáDÉ¡&èÂ?ׇƒ0"""¢Y"¸‡Ãwüà;áGk -ÝŒÀï÷ÃëõbxxX1HwÂ$±ÙlÈÍͨêÑ 'õ|o»% óù|RÌC xo£ìv{ÌÑt¡Ñh°dÉ,Y²7Ýt„¨­­•"Û¶mCCC ´´•••r@¤²²f³9É5 "šž†‡‡qüøqÔÔÔàÀ8pà<ˆ®®.¤§§£¢¢UUU¸í¶Û°råJ˜L¦d9iø­?,XìRDÔ‡>øáG3šá‚Kî¡#Ú07ÜŠy衇˜aF>òa…‹±X1,øÇ~) $ þb<ô±¶¶.— n·B|öÚ08B4~Ô=BïJ*õü#}æìv;ªªªU, ÊËËy'R""""R-8@¢V¤»í‡>¯««ÃÎ;ÑÔÔ„Åô‘î¶Ü;ôyII RRRÆ»êD4‡ÒâmWü~Øõ*à³kVÁÛ©ÌÐíJQQ´Zm’jK4ù4 ***PQQo¼ÐÒÒ‚È–Ÿxâ Üyç ßXdéÒ¥X´h,X€  333™Õ "šRÜn7Ž=ŠcÇŽáÈ‘#8xð :„žžhµZ,Y²‡6l@ee%–.] N—ìbO „À‰ÀÕWOøb0|ðÀ7Ü1{íþ wä &˜PôéO! qNƒñÓiXðÿib©½[¬tÇÅxÁǃ‘‘yºà/ᣅGÊÊÊ=Õ%šT~¿?âç%¸!KCCº»»åit: ¢=¤G6‚#"""¢©Bí $j|×ÔÔ$Üà;ZÄh4òfDI¢&0& S“zï``Œh|aõêÕX½zµ<Ìï÷Ëw´?pà^xáüô§?ÅÀÀ4 ÊÊʰ`Á9$²páB,\¸EEEI¬ ÑÄÁÇŒ£GÊáÚÚZ;v ~¿——‡… âôÓOÇW¿úU8,^¼˜ç"q0BDDDDDDDD42Ô×óæjòhC[X‘þöÀƒ|ÖÐ_}Xïs0U¨ ë¹Ã l°A ÞqoºÒëõ°X,qƒ#ýýýhmmÙ·o\.¼^/†‡‡Ãæ«×‘ÒÒRäääLFu‰bŠô~lllÄàà <Ôˆ%^ÐÃl6C£Ñ$±vDDDDD+øÆqÇ—nPÚx<´¹"ñù|RÌ#ôx.¾øb\|ñÅò°¡¡!ÔÕÕÉ  =н{÷â±ÇCWW€OÂ% .Äܹså°èœ9s`·ÛQRR’¬ê©288ˆ“'O¢¾¾uuu¨««C}}=Nœ8cÇŽ!¬V+,X€ÊÊJlܸ§v-ZÄíÜ(1BDDDDDDDDÔЊ@H?úá†[q4¡IѳGðß=èQÌ.90à #Œ0Á„r”ãLœ LŠáf˜‘…¬É®-M:NUpd``---{Lòìܹ'OžL88bµZ‘——7Õ¥FMУ¡¡AѸ,ô=ép8 Y‚ÿGDDDDD‰ ¾AZ‘z%m¬^WW¿ß¦¦&tvv†-SMhDV\\ŒÔÔÔñ®:Ñ„’ñ>+Òséîש‡ËàÏ…tóƒHŸ“É„´46ù#š®ÒÒÒ0þ|ÌŸ??ìrH䨱cøè£°oß>œ‡ˆ$±ÙlÐjÙ‹*/éúI¼`‡ËåBKK‹¢WK@„ îÝ2Úû™=\‘¤´´¥¥¥¸ä’KÂþ'…0ƒ9‚—^z ü±¼?•ö£v»]¾þü7¯åQ<Áß%ÕÕÕ…ýOOOGii)ìv;¬V+–/_®©©= ±c „ˆˆˆˆˆˆˆˆf„ô hF3šÐ'œpÃæOÜpà 'zÑ+O“ŠT£æÌ4˜Mé8%}ÎÃJ£VXa‚ 6ØPŒbhÁ4½I ***bŽ/8ÒÜÜŒÆÆFEƒ©±C¼^GØÈaêéïïGkkkÌÞ<\.WXC-5AÒÒRäää$±vDDDDD4™  ,KÜsOIhãûHA’ÚÚÚ„ßÇꕄ祳‹šRð0µ!%»Ý1¸dµZ¡Óé’T["šÉòóóáp8"Þ¨··W¾;cc#\.át:ñÎ;ï ¡¡½½Ÿ}/’••…ÒÒRX­VX­V”––¢¸¸%%%(..†Ñh„ÙlæÝû‰fááaø|>x½^¸Ýnx<ø|>y›át:ÑØØˆææfÅñ¶Éd’ƒØóæÍÃÊ•+åðšÝn‡ÍfcÏeS_""""""""šÒħ½x¸à„MhB3šÑ€Eø£ò4:è`†XPŒb,Ã2\‚Käp‡” &˜‚àÿ»øÇ?€5&±¦DSÃhƒ#Á )êêê°wï^444 »»[žF§Ó¡   np¤¤¤)))]Õ- ­­-nÐÃívC!O'5 ¾‹ièëS^^ެ¬¬$ÖŽˆˆˆˆˆf éT-©¬Æýuuuò9k{{»bzé¼4Vh$x˜Édb#·)¤¯¯OU°Ãï÷Ãét¢££C1}p€HzGÔ÷Cqq1RSS“T[""u222°xñb,^¼8ê8íííp:hhh#MMMp¹\¨©©×ë…×ëUL£Óé`2™äý¡Ñh„ÅbÑh ^XXÈë¹D“, µµn·>ŸO{x½^ÅpŸÏ§½jµZ˜L&X­VùxèŠ+®€ÅbAYY™tÕëõI¬!%‚g,DDDDDDDD”4C‚ .4 AîÝ£aá ÈÓä#XPŠRXaÅ98X`…6Ø`&˜+ÈÉ“À)§Œoåˆf8µÁ‘àÆ¡Rp$¸‹qà“/# ¡½”——Ϻ†‘ÖgpãµAŠŠŠ°0Nii)ÒÓÓ“X;"""""¢Ø‚{!‰t—ôPÁ=DD Hç¦~¿^¯ÃÃÊyDê!"ZÄf³ñŽê*I72ˆ÷úHÃ|>†††óí±Tê½#VЇˆh6ÊËËC^^žê›…n“¥¿?øàƒˆ=Há¡»XûKé—Á;¢OÄ =GûmnnVÌ#RÏy‹-Šøý oÊ5ó0BDDDDDDDD¦}hF3ê>ýqÁ¥xÞ€ á³/ró‘;ì0ÃŒ T`VÁ‹ÜÛÇ©8¹˜€/Õ]. ªjüçKD0 °Ûí°Ûí1ÇS‰tw×РC¤Ç²²²)G×Xõ—ÕÔ?RÐc:ÔŸˆˆˆˆˆh"HÔ n -¨PWW‡;w¢©© Šé#õBÜ6ôùLi¾ ]_‘7†ÞÈøì7xýI繡ëÔf³A«Õ&©¶DD3“Úž»„ðù|ðz½ðù|hkkCkk«üÛÖÖ†¶¶6Ô××£¦¦Fþhˆ$55ÈÍÍEnn.òòò››‹œœäääÈÿ/xXNN{2¦¤B ½½èèè@gggØ£ßï8¼££mmmŠ›eI233QPP€ÂÂBùÑf³aÙ²e(((PüO걇Ÿ…ÙßѨ bPîÝã$Nʽ|?ïB—<~! QúéÏB,Äj¬F)JQ†2”£%(A*’t'¨æf ¤$9Ë&"ãñûý¨­­•‡S™ˆ2Ô=âõb6›áp8ÂÊ;{H!"""""šhjÃJÔ!jjjBD ’ÆIéÝ1ø|6^r0f5€¿|2}¤`ŒÔ{ÇLÆÍ&“ &Sb½µwuuÉ‘––98"5¦oooGgg'¼^/>üðCEÃùÐåHRRR››‹üü| yÿ£×ëa0——½^ŒŒ äææB¯×#33999ÐëõÈÊÊBvv6ôz=²³³‘••…ôôtù‘f†žž  §§@èííE @{{;úúúà÷ûÐ×ׇöövôöö¢³³@ÝÝÝèêê’‡E s@zzºZÊÏÏ—ÿ.((Àœ9säPSh¸Cú[§ÓMò¢éŽ""""""""Šh#pÁ…ú(?.¸0Œa€Z9ìQ†28àP´¶¶ÊwVoii‘¥;­üñÇØ¿¿<<4@RPP£Ñˆ¢¢"ù·¸¸XñÜh4Âd2¡¨¨ó^¦ÇãQ<—îôL£Ñ(æ]TT‹Å‚ÓO?]~î9Õƒïý=\ß}=þ_êÿC–w©&""¥ììldggã”SNÕôÑz`èèè€ßïØ¨ÿã?F___XÿŽŽŒŒŒÄ]¦2ÑétÈÈÈ@ZZ²³³¡Ñh——ÈË˃F£Avv6ÒÒÒ‘‘N'‡R(Æ Ï/xÝH½Ï£å(E©Ü»‡š$×f”ü~ ¿Ÿ¢&33óçÏÇüùócŽÚƒ‡ô+õÜñöÛo£¹¹­­­ò4v=Ÿ>ÏÈÈC‹Ë—/ÇW\!÷î!ýLX}‰ˆˆˆˆˆhz0 °Ùl°ÙlªÆQ6"…8’à–ÅŸ>z‚æ©Óéä ‡Éd‚ÑhDyyyÔpIQQ‘ªÞ;ÊQŽMY›à„;°¹P×𔈈HÜÜ\äææ¢´´t\æ'õÑÙÙ‰¾¾>ùo)1<<,‡¤°‰4pBÈ7jjjŠ\â"f»X˜hA›¬¬ÿŸ½;«ªNü?þºx‘M Q¤Ì KMÄ]p_ÑÜÍ\-5mlLm™, ušòWãÞŒ¹ç®)¦™+i¹€+"傆‚ŠâEP~÷+á.–÷ó>îCsÎûÏ8ÝåÍÇÉ:ÃXæøÌYÞ\\\(V¬ŽŽŽØÛÛ[‹=wÎ"sçÏ" !""""""""…X:éÄÇ NC ±Äf)}$óû›ÏÅ(ÆÓ$>þ÷?Ë–56‡ˆ"§3ޤ¦¦Zgiа!‡ÇÇÔ³'^^^¸¸¸äQZ)jlllðððÀÃÃ#Çë$''[g ©ðÖ[üòÑGÖò‡³³óÉÚ•®<Ã3„B#±†5T ÂÙ—ˆˆÈ£²··ÇÞÞ>_î7CƽÊ#™%•œ¸×6–,YÂáÇ?~|–åw4$³|ñgNNNØÙÙž!ED !""""""""Ü-nñ ¿XKÇ9ný9Ž8ÒH 4¥©D%¼ñ¦=í³>ÊS;ì >ƒdB4CˆˆÜGñâÅ©P¡*üþE–jÕªAÕª§ÉÎÙÙggçßùÁïyyæÉ¾`/{éHG d%+ Oö-""RPÝY–rU}‡âÈ‘#tïÞ=Oö'"O† !""""""""DIDÍa{ÇíGH!7Üðùãö"/âƒÕ¨†?þ¸âú€=Q¿ýöö~”ˆˆˆˆˆˆˆˆHAç…ÛØFúД¦Ìb}èct,ùnܸat yD*„ˆˆˆˆˆˆˆˆä#×¹ÎQŽr„#æ0Gÿ¸ÅK*¿Oå솾øâ‡èÀ(FᇾøRŠRA””®®`2DDDDDDDDD¤Pp‰oø†wy—¾ôå8Ç #ÌèX"""r³Ù¬BˆH! Bˆˆˆˆˆˆˆˆˆ’HÊRúˆ&š£å~!ƒ ì±Ç?ªQ®tÅ;îFÇ/\®^£Sˆˆˆˆˆˆˆˆˆ*Å(Æd&ãƒÃÎ/üÂ|=öFG~Ÿ!Äb±CD‘ !""""""""OPIDÍaK¬õçSœ²?|ñ¥:ÕéOªS|¨Nu̘Ž_4$'ƒ³³Ñ)DDDDDDDDD ¥! ÁozЃœà¾¡4¥Ž%""Rä988h†‘B@…‘Ç •T¢ˆâ8ÈAüqK" €R”¢ê· ‚¨F5ªP… TÀ„ÉàôE\r²fy‚ZÑŠíl§#©O}Ö°†*T1:–ˆˆH‘f6›±X,ddd`2éóJ‘‚J…‘\:Ïyrýì·?ŽqŒtÒ)A jPƒçxŽnt£Ú·2”1:¶ÜËÕ«*„ˆˆˆˆˆˆˆˆˆœæ4KYŠ3ÎFÇ)2 !‹Åà$"ò(T‘B'Ž8¶±­le7»9Æ1ns›gx†ºÔåU^%€êPG.5¶¶Ð«Ìš7oÞ}ŒÉýúåm.‘"*„¶°…Žt¤XÇ:¼ð2:–ˆˆH¡g6ÿþKò4CˆHÁ¦Bˆˆˆˆˆˆˆˆx1İm|ÿÇí~ÁŒ™zÔãE^$€êRO<Ž*ùAïÞ0cƽŸ7™àÅó.ˆˆˆˆˆˆˆˆH@»ÙM[ÚZK!U©jt,‘B-s†BD 6BDDDDDDD¤À‰%–ílg;ØÈFâˆÃjS›^ô¢-hD#̘Ž*ùQ£Fàé ññÙŸ³µ…¶m¡T©¼Ï%""""""""R„U¤";ÙIg:Ó†|Ã74¥©Ñ±DDD ­ÌB,‹ÁIDäQ¨""""""""ùÞ N°‰MÖY@ÎsGiHCB ¥ͨK]ì±7:ª&¼ôüë_pófÖçnÝ‚¾}É%""""""""RÄ•¢›ØÄКÖÌf6}èct,‘BI3„ˆ*„ˆˆˆˆˆˆˆH¾“B ;ÙI¬bG8‚#ŽÔ§>¯ò*hDcSœâFG•‚ªwoøøãìËÍfèÐ!ï󈈈ˆˆˆˆˆˆÅ)Î0 ô¥/Ç9NaFÇ)tÌf3&“I…‘N…ÉŽqŒ•¬d=ëÙÁnq‹ÚÔæE^¤5­©O}lõV†<.µkƒ¯/ÄÄüß2;;èÖ J”0.—ˆˆˆˆˆˆˆˆˆ`ÂDa”¢oò&g8Ãÿãÿé=b‘ÇÈd2Q¼xq,‹ÑQDäè¿EDDDDDDÄ·¹Íö°òÛQŽâŽ;miË\æÒ’–”¦´Ñ1¥0ëÛ&M‚´´ß§¥AŸ>Æf«‘Œ¤<åy‰—8Ö°gœŽ%""Rh˜ÍfÍ"RÀÙ@DDDDDDDŠŽÛÜf+[ÊPÊQŽ4`ËhO{¾ç{Îsžù̧}T‘'綾þ¯ P²$´ha\ɦ3ÙÌf~â'ZЂ‹\4:’ˆˆH¡áàà BˆH§BˆˆˆˆˆˆˆˆõÙÏ~£#‰ˆˆxf³Y…‘N…ylRHá+¾"˜`|ñe&3éE/p€h¢ÏxžåY£cŠüŸž=!#<=IªZ•¤¤$ë===Ýèt"""""""""r‡²”eÛ¨A ‚â~0:’ˆˆHæààÀ7ŒŽ!"ÀÖè"""""""Rðã3˜Á\æ’B èÀJVÒ–¶ØêíÉÛ·osåÊ’’’¸råJ–û7¸|ù2ééé$''sóæM®_¿Î7°X,\¿~›7orõêUnݺÅåË—ÉÈÈ ))éžû¹Ójà‡øxþùÔS9ÊêêêŠMöß³âææ†Éd¢dÉ’ØÚÚâì쌽½=ŽŽŽ˜ÍfpttÄÞÞggglmm)Y²$%J”ÀÕÕ5˽dÉ’”,Y“ÉôPçSDDDDDDDD¤°r‰լ¦ýhIKþËéF7£c‰ˆˆH*„ˆ|úF†ˆˆˆˆˆˆˆ<”[Üb5«ùœÏ‰ o¼y—wéO<ð0:ž(11‘ß~û„„8þ< \¼x‘‹/f+|\¹r…äää»n+³Dáââ‚NNN/^œ%Jààà€ÙlÆÃÃ#KÉ"³°áää„]¶mþ¹ÐqxØäæ–e\N %iii\»v[·nqõêUëãÔÔTRRRHJJ">>ž””RSS¹víiii\½zÕZn¹\\\¬%WWW\\\pwwÇÝÝÒ¥KS¶lYJ—.MéÒ¥ñðð T©R÷ùÛ)øŠSœ,àu^§½øœÏ£c‰ˆˆ8f³‹Åbt y*„ˆˆˆˆˆˆˆH®\â_üq;ÃZÑŠU¬¢í°!û¬ R8¤§§ϯ¿þÊéÓ§9{ö,gΜÉRú¸xñ" ¤¥¥eY7³¸PºtiÜÝÝ)[¶,Ï>ûl¶Y1\]]qssËòØÞÞÞ #Î[7oÞÌR¹ÛL)—/_¶þ|ðàA빿xñb–mÙÙÙe9׸»»óôÓO[ïåË—ÇÓÓ[[½=(""""""""S1Š1ƒxãÍ«¼J,±Lf²Ñ±DDD Í"Rðé_É‘œ`*S™Ç<ŠSœ d(CñÅ×èhò\¸pØØXΜ9Ù3gøõ×_³ü|þüynß¾ @±bÅ([¶,åË—· êÖ­k-~xxxP¦Lk D¥ƒ³···ž¯ÜJOO·–C.\¸p×ÙYbbb8}ú4çÏŸçÖ­[@Ö¿G///ž~úi*T¨`ýÙÇLJ2eÊ<îCy¬Æ2<®qñ/ýò"‘R!D¤àÓ§ñ"""""""r_ûØÇt¦³€”§<çï¼Æk¸âjt4É¥¤¤$bcc­÷èèh>̉'¸zõªuœ››>>>xzzR³fM:w§'åÊ•ÃÇLJòåË«ä‘ØÚÚâé鉧§gŽÆg^çÎ#>>ÞúsTT«V­â×_%==€âÅ‹ãååEµjÕ¨^½:>>>Ö»··766ú`]DDDDDDDDŒ7¸áFozsŽs,`fÌFÇÉ÷Ìf3—.]2:†ˆ<}r/"""""""ÙdÁ Vð!ò#?Ò€,béL1ŠOîãæÍ›9r„¨¨(¢¢¢8~ü8111ÄÄÄ’’¸ßú IDAT@‰%ðõõÅ××—æÍ›óÚk¯áëë‹åÊ•SÙ£sss£N:Ô©Sç®Ï§§§sîÜ9NžÝ«ÛÜf)KyŸ÷‰&šÎtæ_ü‹@Ž&rûömN:Å¡C‡ˆŠŠ²þyüøqÒÓÓ±³³£J•*<ûì³´k×ÎúÅ}???Ê•+gt|ÉÇlmm)_¾<åË—'(((ÛógÏžÍR‰‰‰aÍš5|òÉ'¤¥¥aggGåÊ•©Q£†µ$R£F *V¬¨EDDDDDDDDä‰jF3¶³6´!˜`Ö±<ŒŽ%""’o©"Rð©""""""""Üæ6ËXÆxÆsŒc´£s™KmjMø½üqäÈ"##‰ŒŒdß¾}>|˜ëׯc2™ðööÆßߟjÖ¬IõêÕ©R¥ vvvFG—BÈËË ///š6mšeyZZGÍRPš5kqqqdddàèèHõêÕ©S§P¥J•DDDDDDDDD䱪A ¶³Ö´&@¾å[üð3:–ˆˆH¾d6›±X,FÇ‘G BˆˆˆˆˆˆˆH–Yùã'èJW–±ŒªT5:Z‘vöìYkùcÏž=ìÝ»—ääd¨U«õë×gÈ!øûûS½zuœœœŒŽ,‚þþþøûûÓ»woëòääd¢££9tè‡bï޽̙3‹Å‚‹‹ /¼ð‚µ €———G!""""""""…7ÞüÀ´£MhÂzÖó<ÏKDD$ßÑ !"Ÿ !"""""""EPi|Í×¼ÏûÄG/z±’•<˳FG+r2228tè[¶láûï¿'22’³gÏbccCÕªU  Gàïï¯Y?¤Àqvv&00ÀÀ@ë²´´4­ZµŠþóŸÜ¾}///êÕ«G“&M¦F˜L&@DDDDDDDD "<ØÂ^äEšÑŒµ¬¥! Ž%""’¯˜ÍfBD 8BDDDDDDDŠ›Üd! ™ÈD~åWzÑ‹µ¬Å?££)GeË–-lÙ²…­[·’@©R¥hÒ¤ #FŒ €^xggg££Š<vvvÔ©S‡:uê0tèP®^½Ê¾}ûسg»wïf„ üå/¡téÒ4kÖŒ   ‚ƒƒyöY×DDDDDDDD$gœqf-kéCZÑŠå,§5­Ž%""’o888`±XŒŽ!"@…‘" 4f2“÷yŸDÄ Æ2–gxÆèhEÂùóçY»v-[¶lá»ï¾#>>gggš4iÂØ±c æ¹çžÃÆÆÆè¨"†qqq!((ˆ   nß¾Íþýû­å©±cÇ’œœL¹rå&((ˆöíÛS¦Lƒ“‹ˆˆˆˆˆˆˆH~f=‹XÄ`Ó‰N,`]éjt,‘|ÁÁÁA3„ˆp*„ˆˆˆˆˆˆˆbd°Œe¼Ë»ÄÇk¼ÆXÆRŽrFG+ôŽ;ÆÊ•+Y±b{öìÁl6Ó°aCFŒApp0uêÔÁÖVo͈܋ µkצvíÚüõ¯%==½{÷Z‹U¯¿þ:ƒ&00Î;BåÊ•Ž-""""""""ùP1Š1‹Y¸àBOzòþÃ@KDDÄpf³Y…‘Nß:)¤v±‹±Œe;ÛéF7Ö±ŽJT2:V¡Í’%KX³f ûöíã©§ž¢]»vŒ5жmÛâäädtD‘ËÖÖ–ÀÀ@yûí·¹qã¬Y³†ððpÆŒƒ:t {÷î4hÐ@³îˆˆˆˆˆˆˆˆˆ• Ó˜FIJ2ˆA\å*oð†Ñ±DDD åàà@ZZ·nÝ¢X±bFÇ‘‡ ODEDDDDDD ™#¡=h@ŠSœ½ìe1‹UyBŽ;Ƹqãðòò¢Fü÷¿ÿ¥Y³flÛ¶ .0þ|ºwï®2ˆÈcæàà@ÇŽùâ‹/8sæ [·n%$$„5kÖиqcÊ—/ÏÛo¿ÍñãÇŽ*""""""""ùHa|ȇü…¿0 FÇ1”ƒƒ‹Åà$"ò°4CˆˆˆˆˆˆˆH!q–³Ld"³˜Eª°šÕt ƒÑ± ¥ääd–,YÂìٳٱcåË—'44”nݺáïïot<‘"§X±b4mÚ”¦M›ÎÁƒY²d óæÍcÊ”)4lØW^yEå,`,cqÁ…×yÜ`2“Ž$""b³Ù À7ptt48ˆ< Í"""""""RÀ]ãa„á‡ØÀç|Ψ òìØ±ƒW^y…råÊ1|øpžyæ¾ýö[N:Å„ ò}$##ƒùóçÈSO=…ƒƒÏ=÷ï¼óQQQYÆšL¦'ž'/öQИL&Ã΋‘û~ÜjÖ¬É?þñâââX·nåÊ•cèС”+WŽAƒ±k×.£#ŠˆˆˆˆˆˆˆˆÁ†2”ùÌç>aøÍm£#‰ˆˆä¹ÌBnܸapyX*„ˆˆˆˆˆˆˆP©¤ò ŸP |Æg|ÀœàCB1Š¯Ð¸}û6«W¯¦Aƒ4jÔˆ={öðÞ{ïqúôi¾þúkZµj…MÁx‹e„ ,Z´ˆ/¿ü’³gÏÏ´iÓX¿~}¾/³ErßOŠ mÚ´aÑ¢Eœ?ž?þ˜ýû÷Ó AêÔ©ÃüùóIOO7:¦ˆˆˆˆˆˆˆˆä%^b˘ÃúÑtô^‘ˆˆ-™3„X,ƒ“ˆÈÃ*ßV‘,"ˆ µø;g0ƒ9Á þÂ_°ÇÞèh…Æ­[·˜7o~~~tîÜ™råʱk×.¢££;v,îîîFG̵3f0gÎjÖ¬‰Ùl¦dÉ’ñ¿ÿýÏèh"O\É’%2dûöícÇŽT¨PRµjUæÏŸÏ­[·ŒŽ(""""""""èD'Ö²–•¬¤+]± /ÄŠˆHÑ¡BD >BDDDDDDD “œ¤=hIK*Q‰Ãf2“)II££*K—.¥F„††ÄñãÇYºt)FG{$)))¸ººf[^­Zµ,³C˜L&럙÷ÐÐPëó«V­¢aƘÍf¼½½5jÉÉÉY¶™¹ÞÉ“'yñÅqss³.ËÉ>î&'û½rå £FÂÇdzٌ§§' 22ò¾Û~P^€¨¨(Ú·o³³3ÎÎδiÓ†¨¨¨‡Ê M»víprrÂÕÕ•.]ºð믿Þ7碢¢h×®5O»ví²åÉ<†Ó§O‚³³3eÊ”¡oß¾\ºté¾Û‹‹Ëò÷õkײ,‹‹‹ËqÞü¤Aƒ,_¾œ£GÒ¸qc „¿¿?Ë—/7:šˆˆˆˆˆˆˆˆ ˜`Ö±Žïùžö´ç׌Ž$""’'T)øT)RH!Œ0jPƒƒdXÍj*PÁèh…JTTÁÁÁôìÙ“:uêpäÈfΜI¥J•ŒŽöXtìØ‘qãÆ‘””tßq™åŒŒ ë}æÌ™ÖçCBBèß¿?‰‰‰ìÞ½›ÄÄDFŒq×m :”Ñ£GsîÜ9Ö­[—ã}ÜMNöÛ¿J–,É®]»¸rå ëÖ­#&&†zõêåè˜ï•7&&†–-[Ò±cGbcc‰‹‹£oß¾„„„pæÌ™\ç°@PÍœ9“wß}___êÕ«G‡ ÁËË+WÛ¹s6‘%J0mÚ4|}}ï:öwÞ¡Aƒ´mÛ6˺¹•“ýnÞ¼™Ù³gSªT)jÕªÅW_}…OŽöq¯¼aaaŒ5Š×^{Í:¶o߾ܼy“>úˆéÓ§ç*gXXýë_yå•W²l/==]»v=0gXX£Gβ~hh(W¯^e„ |õÕWYÆ6Œ6mÚàääć~Hppð÷ãïïOzz:à¹çž`Æ ”/_Þú¸0ðóóãÿû#FŒ`Ĉ<ÿüó¼ýöÛüíoÃÎÎÎèx""""""""’GjS›ml£%- &˜ l 4¥Ž%""òÄh†‘‚O3„ˆˆˆˆˆˆˆäSÇ8FÚBMhÂQŽ2–±*ƒÌ~ëׯO§NذaéééT¬X1ÇE”{刈 gÏžÙ–·oßžM›6å:ç¦M›èܹs¶±™¥‰ˆˆ¸ëú]ºt!"""Ûò&MšdyìííM|||ŽöÕ¿¾øâ ëãÿûß¼ùæ›9Z·  dÏž=„‡‡óÉ'ŸP¿~}Ž?nt,ÉCU©Ê÷|O"‰4£ç8gt$‘'&³¢BD .BDDDDDDDò™Ë\fã¨IMH`;Û™Ï|ÊPÆèh…κuë¨W¯vvvüüóÏ >›Âÿv‰««+½zõbþüùÄÅÅa±X9rdŽÖMHH 44”§Ÿ~[[[L&&“éžãK”(ñX2çt¿K–,! €áÇSªT)š4iÂôéÓIKKËÑ~î•÷Ò¥Kx{{[÷›y/[¶,qqq¹ÎyñâEÊ–-›mùÝ–ýyŸ÷[ßÓÓ“‹/f[îææ–åqñâÅs\’éÓ§‹/&%%…ØØXN:E»vír´nAdccÈ#øù矱±±¡^½zlذÁèX""""""""’‡*Q‰øÛܦÍ8ã#‰ˆˆ<vvv+VL3„ˆ`…ÿ"""""""DÌaÏò,s˜Ãç|ÎüH­Pš5k:u¢K—.lÛ¶ ???£#ÂÑÑ‘©S§²bÅŠùå—qttdûöíX,222r\,x9ݯ««+áááœù„Ù³g“@bb"‹/æƒ>ÈQžñãÇÎÌ™3ùí·ßHHH`Ö¬Y„‡‡gÉó¸tèи¸8ºwïN‰%ûöó»÷ߟѣGÊž={ŒŽ#""""""""y¨4¥ÙÌf\q¥͈%ÖèH"""•ƒƒƒf)ÀT1ÈOüD L`a„±—½Ô¥®Ñ± µäädzôèA›6m˜4i’ÑqòÜ®]»¨Zµ*ýúõÃÝÝwwwLçÎùÛßþ–elxx8/½ôNNN >œÏ>û €ùóçc±X¨W¯ÎÎÎtéÒ…öíÛYgüÈüÙd2Ýs&{íãnrºßˆˆhÚ´)...Ô¯_‹Å¼yóî{n”×ÛÛ›eË–±páB*Uª„§§''NäÓO?%$$$×9}||X¿~=K—.¥bÅŠT¨PY³f±`Á‚lcïÆÏÏõë׳lÙ2|||¨X±"K—.eýúõøúúÞõ¸îu¼ `ccƒ‹‹ ¯¿þú}sf~ø!-Z´ gÏž\¿~Ýè8""""""""’‡JSš­lÅ‚â$'Ž$""òؘÍfBD 0BDDDDDDDòX )ŒcàŒ3ûÙÏXÆb‹­ÑÑ‹ÀÀ@Ö®]{×ç8ÀÓO?Mzz:ßÿ=˜Íf¼½½™5kV–ñÉÉÉŒ3???J”(««+-[¶dÍš5•í£>âúõëÌ™3ç_¸/ŒùòË/9qâ©©©¤¤¤ðÓO?ñúë¯g;Ý»w'66–ÔÔTNœ8A§Nððð`þüù\¸pÔÔT:D=ÈÈÈ ##ú~æã?/ÏÉ>î&§ûmÞ¼9ß|ó /^$55•ØØXÂÃÃqvv¾ï¹ÉI^???–.]ÊåË—¹ví‘‘‘téÒå¡rT¯^uëÖqíÚ5’““ùöÛo©V­Ú}3ÜÉßߟõë×síÚ5®]»Æúõëñ÷÷¿çq=hùýŽùòå4nÜOOÏæ*¬lll˜;w.—/_æã?6:Žä&“‰[·nñþûïãííMñâÅ©\¹2ÿþ÷¿³]½z58::âèèxßÿ¿yÜôöѹáÆF6ª"""…Žƒƒ‹Åè"òTÉCkYKUªò%_ò9Ÿ³…-T¦²Ñ±«7ÞxƒiÓ¦Ýõ¹O?ý”×^{ [[[Ž;F·nÝ5j¿ýö‹/fÒ¤IlݺÕ:¾ÿþ¤§§Á•+W8uêo¼ñŸ~úi®sY,¦M›Æ˜1cxê©§öðDŠ“ÉDdd$S¦LaÔ¨QFÇ1\éÒ¥3f S§NÕ#b5lØ0nܸADD‰‰‰üç?ÿaêÔ©,Z´È:f÷îÝ 0€‘#GÇ©S§xýõ×yùå—ùñÇ L/""""""E‰^Ã>:7ÜØÄ&ÊR– ‚ˆ!ÆèH"""ÌÁÁA3„ˆ`*„ˆˆˆˆˆˆˆä \ ýè@êQccC0Qøf©èÖ­GŽ!:::ËòK—.±lÙ2† À¤I“7n½{÷ÆÅÅ…€€¦NÊG}d]gÓ¦M„……Q¡Bììì(Uª:tàÛo¿Íu®7rýúuú÷ïÿh(RÄ4jÔˆ&MšP«V-££ä ýû÷'99™M›6Eò‰2eÊ0iÒ$|}}qtt¤iÓ¦|úé§L:Õ:fÊ”)üãÿ OŸ>”.]úöíËĉ™}šÊ•+c2™¬÷R¥Jo³páBðõõ¥jÕª 8•+W’‘‘‘ëLñññ”+WîÑNDŠâ#JPÂèXyÆÝÝ_|‘/¾ø€•+WR¶lY¬c\\\8wîYî·nÝ²Ž©X±"ß|ó —/_fáÂ…Ô¯_ŸI“&šëLÎÎÎ\½zõÑNDм«W¯âêêjt É'llü6{™2eøí·ß²-ÿí·ß([¶ì“ˆ%""""""’^Ã>~®¸² øàC0ÁDmt$‘\3›ÍX,£cˆÈCR!DDDDDDDä1ºÀBáU^e ÙË^êPÇèX†xã7˜1ciii|úé§Œ1"ËóAAA¬\¹2GÛ*^¼8Ï=÷C† aýúõ,Z´(×y|||8~ü8iii¹^W°ÎâbÄ~s3öÏw777Y¼xñLY°œ8q‚Ž;âîîží|Ƀ¥¥¥qüøq|||ŒŽ"HíÚµY½zu¶å«V­¢víÚ$¹;½†Í=W\ÙÄ&ªP…`‚‰"ÊèH"""¹¢BD 6BDDDDDDD“%,¡:Õ9Ä!¶°…éL§8ÅŽe˜5jP¥JÆÏ‘#GèÑ£G–çÇÏĉ™7o—.]âúõëlÞ¼™öíÛ[Ç4iÒ„ÿþ÷¿œ9s†ôôtΟ?Oxx8Mš4ÉužöíÛsùòe6oÞüÈÇVeddá23fÎ6“––FTTÆ cĈ|úé§'ÌúõëGóæÍ‰Í2;ä̦M›¸råJ–«Dä­·Þâ½÷ÞcÁ‚$$$À‚ øûßÿθq㌎'""""""b¥×°ÇGÖ²–jT£9Í9Ä!£#‰ˆˆä˜ !"› !"""""""(‰$^æezÒ“®tå iBî …Ño¼Á‡~ÈàÁƒ±··Ïò\õêÕY·n‹-¢bÅŠ”.]šI“&ñæ›oZÇLœ8‘+VðüóÏãììLãÆ¹uë_ýu®³x{{ĤI“ù¸¤`°µµÅËË‹~ýúñ¿ÿý©S§)_Ø·oÇÇÅÅÅè(NFFï¿ÿ>-Z´ |ùòFÇ‘¤AƒÌž=›iÓ¦Q¡B*T¨À´iÓ˜3gFDZÒk؇çˆ#kXCuª«"""ŠÙlÆb±CD’ !"""""""`=ë©A 6³™Õ¬æ ¾À '£cå-Z´Àl6óÚk¯Ýõù矞uëÖqõêURRRøî»ïhÞ¼¹õùfÍš±téR.^¼È78qâ“'OÆÕÕõ¡òLž<™;v0wî܇Z??Zµj 6Äl6ãííͨQ£HNNÎ2Æd2a2™8}ú4!!!8;;S¦LúöíË¥K—²m3::švíÚáää„««+]ºtá×_ÍQž+W®0jÔ(|||0›Íxzz2`À"##³Œ‹ŠŠ¢}ûö8;;ãììL›6mˆŠŠÊ–ûÎü&“‰ÐÐÐÜœ«€€Î;—myNr8p€V­Záè舋‹ mÛ¶åÛo¿Í¶­víÚY·Õ®]»,Ûºó8¶nÝ ÀÂ… ³,Ïíù1™LœsæÌ‡:O‘‘‘x{{gY–ÓǧgÏžŒ3†„„vïÞÍõë×iÓ¦M–mež·“'OZKmÚ´!&&€ôôt*UªDbb"Íš5 W¯^œ;wŽR¥JqíÚµ‡:?C‡eôèÑœ;wŽuëÖÝ÷<üùœ>HN®¯?_3§NbàÀôìÙ3Ë> ºŸþ™‘#G2nÜ8jÕªetɧœpbë¨NuZÒ’Ã6:’ˆˆÈ}™ÍfBD3¡r= IDAT 0SFaùDVD$˜0±ˆEô ‡ÑQ¤0™L,Z´ˆ=t=‰ˆÜéË/¿dÈ!Fǹ§Mlbƒ¸ÉM¾äK:¡ßŒög&“‰%JкukæÎ‹‹‹‹Ñ‘¬ÒÓÓiݺ5ÑÑÑ|÷ÝwT«VÍèHURR¾¾¾Ùfþ0™L¬X±‚ë²cÇŽÌÙ³g­Ë^~ùe^xáÞxã,ëÏ;—>ðËýÎÎÎüòË/”*UʺìÔ©SøøøX×íÛ·/5kÖd̘1YÖ={6`úôéYrçæí«;Çߺu‹ .°yófÆŒc-(eÊiŽ>}úжm[^~ùeë˜cÇŽQ¥J•,ÇT·nÝlç-<<œŸþ™¯¾ú €wÞy///†n3yòd~ùåf̘ñPçgË–-Ö‚InÏQN–ßén××½®™… Ò»wïBQ‰ŠŠ¢yóæÔ¬Y“ 6P¬X1£#‰ÑL&X´ôz^DäöíÛ@:u N""’;ú÷KD Ì×®‹›£J!…v´ãGØÊVªRÕèH"""wÎÔ©S9}ú´ÑQD œ¼|ÿà^ß7Õ !"""""""9”Fa„ц6Ô£QD© r\¿~åË—ç«2€­­-+W®¤råʳk×.£#=Vnnn$&&Þõ¹&MšdyìííM|||–e›6m¢sçÎÙÖ½s6Œû©_¿>:ubÆ ¤§§P±bÅ,¥€ˆˆëìwjß¾=›6mÊÑ~îÇd2a2™°µµÅËË‹±cÇòÕW_e)ƒä&Çwß}GóæÍ³ŒyöÙg³ÓÝÎ[—.]ˆˆˆ°>8p sæÌÉ2föìÙ <8×¹2d[ö¤Üíúº×5Ó²e˼ŠõDíØ±ƒàà`ªV­ÊŠ+T‘)A V± o¼iE+b‰5:’ˆˆÈ]i†‘‚M…‘8Æ1 $œpf0ƒ%,Áw£cÉCrrrbݺuÔ­[—   fÏžmt¤‡’@hh(O?ý4¶¶¶Ö"Ľ¸¹¹ey\¼xñl³7\¼x‘²eËf[÷nËîfÉ’%0|øpJ•*E“&M˜>}:iiiÖ1—.]ÂÛÛÛš7ó^¶lYâââr´ŸûÉÈȰޓ’’øàƒ2dH¶mç4ÇÅ‹qw¿ÿÿÞïuÞ<==¹xñ¢õ±ŸŸ%J”ààÁƒlÛ¶ j×®ë\™J”(‘ƒ³’{9½¾îuìO=õÔÉ•—fΜIóæÍiРk×®ÅÑÑÑèH""""""""R€¸àÂF6R–²DqFGÉÆÁÁA…‘L…‘˜Ï|^àl°aû£#ÉcàääÄÊ•+yóÍ7 ¥{÷îY¾¸_¼üòË8::²}ûv,‹µñ(ÜÝÝ9þ|¶å—.]ÊÑú®®®„‡‡sòäIbcc>|8 .¤wïÞYö‘˜˜˜¥¸‘yOIIy¤üV²dI À€?~|–çršã©§žzàµáîîžm¶€øøøle’2kÖ,fÍš•evÜäzÒrz}Ýëš¹Û²‚"!!®]»2dÈFÍòåËU‘‡âŠ+ëY .´¤%ç8gt$‘,°X,FÇ‘‡¤BˆˆˆˆˆˆˆÈ=\å*}èÃð ¯°ƒøágt,yŒllløàƒظq#{öìÁßߟ¯¾úê‘KyeçÎL˜0ooolmm¹0ЪU+V¬X‘mùæÍ›s´¾ÉdâÌ™3ÀïEž={²víZ6nÜhÓºuk¶nÝšmÝ~ø!ËL™Û{† ÆÊ•+ILLÌuŽfÍš‘ęV­šõq‹-X¾|y¶m}óÍ7´hÑ"˲îÝ»³råJذa}úôÉò|nÎÏ“”Óëë^×Ì·ß~ûÄ3>nÌ›7öîÝKDDï¿ÿ>66zUDDDDDDDDž;îlf3öØDç)¸¿LEDD ³ÙÌíÛ·¹yó¦ÑQDä!è“L‘»ØÃjQ‹ïøŽµ¬e:Ó±ÇÞèXò„´hÑ‚ƒÒ¹sgH£FˆŒŒ4:Ö5hЀ1cÆÇÍ›7‰‰‰aèС´Í°°0>ùäfÏžMBB‰‰‰,^¼˜>ø ÇÛ %::šÔÔT.\¸ÀÇœ¥ÆøñãYºt)—.]"99™5kÖлwïl³xxyy±k×.ÒÒÒØ¸q#Ï<óÌC—»»;;vdΜ9¹ÎñÞ{ï1qâD"""¸~ý:ä•W^aĈÖ1ãÇ'<<œ™3gòÛo¿‘À¬Y³ÏvLNNNÓ·o_:v숳³s–çss~ž¤œ^_¾f.]ºÄ×_ÍþóŸ<Ëú8ìÙ³‡† 2hÐ ºvíÊÁƒ 6:–ˆˆˆˆˆˆˆˆxðßaƒ ­hÅ%r6+³ˆˆÈ“æààÀ7 N""C…‘;ÜâS˜Bcã‹/ûÙO[ÚKò@É’%™1c?þø#666Ô«W8`t´{š?>‹…zõêáììL—.]hß¾=ufÌŸÿ<ÛÆÝ–ûøø°~ýz–.]JÅŠ©P¡³fÍbÁ‚wÝÆŸEDDààà@Ó¦Mqqq¡~ýúX,æÍ›gãííͲeËX¸p!•*UÂÓÓ“‰'òé§Ÿ’e{ááá¼ôÒK8991|øp>ûì³{îû^Ç™iذaŒ=“ÉÄÅ‹sœ#s昰°0ÜÝÝéСÝ»wÏRŽðóócýúõ,[¶ *V¬ÈÒ¥KY¿~=¾¾¾Ù² 872dÈlÏå4×ǛәTrs-äôúúó5S±bE,XÀ¼yó ÄÌ?ýô;v$00[[[öîÝËgŸ}†«««ÑÑDDDDDDDD¤)C¾å[®r•6´á WŒŽ$""b-„X,ƒ“ˆÈÃ0edddBD¤ 1ab‹èA££H!`2™X´h=zèz¹Ó—_~y×/ÈŠø°…-ç8éL*©FG‘",s†7nœDD† !""""""R¤¥’Êp†Óƒô¦7?ò#þøKò “ÉD§Nøá‡صk+Väµ×^ÃÓÓ“ÐÐPvîÜitD‘|aÍš5¬^½šòåËãááAhh(ƒæ­·Þ2:ðûl ¯¼ò žžž 6 ???"##Ùºu+:tÀdRPDäÿ³÷çñQÖ÷þÿÿ˜,d$Ê–„E!‚Ö E° w±jÅ¥ ZTzz<­Ý{ì¯êÑs[OÅj«(j«µ‚þê†Ö* Zi¡uƒObd3,ÖLÖùþ3%@‚$W&yܽÍk®¹æâyÍLðzgÞ¯ë%I’$Ij]Gr$/ñïð—r)5ÔI’ÔAÅ B¢ÑhÀI$Œ´ H’$I’”Õ¬æb.fËx‚'¸˜‹ƒŽ¤6lÔ¨QÌ™3‡òòræÎËý÷ßϘ1c2d—\r S§N¥   è˜R Î>ûlÎ>ûì cÔ³~ýzæÌ™ÃìÙ³ùàƒ6l7ÝtS§N¥gÏžAÇ“$I’$I’8–cy8Ó¸š«y˜‡í^.Ijuáp°Cˆ”¬ì"I’$Iêæ1/ñ%¶²•·xËb5YVVÓ¦McéÒ¥,Y²„ &p÷ÝwsÄGðÕ¯~•™3g²jÕª cJÒ§Ÿ~Ê]wÝŸqãÈËËãÖ[oeìØ±üýïgÙ²eüð‡?´D’$I’$ImÊhFó4OóOp=×G’ÔÅ;„X"%' B$I’$IJ 5ÜÌÍ|¯q6g³„% cXб”¤ŠŠŠ¸çž{())áñǧwïÞÜ|óÍôïߟãŽ;Ž[n¹…wß}7è˜R»ö÷¿ÿŸýìg{ì± 8[n¹…ÜÜ\žxâ JJJ¸ûî»ùÒ—¾tLI’$I’$©Q§r*ó8¿æ×ÜÊ­AÇ‘$u0ñ‚h4pI#-è’$I’$µ–lä|ƒE,â7ü†iL :’Ú‰p8̤I“˜4iÕÕÕ,\¸gžy†|0Q rî¹çrê©§ròÉ'Ó­[· #KIkëÖ­¼ñƼüòË<÷Üs¬^½šüü|Î=÷\~ñ‹_pÊ)§žžtLI’$I’$©Y.àf3›oòMºÐ…ïòÝ #I’:ˆp8 Ø!DJV„H’$I’:„…,ä|ƒL2y›·Áˆ #©JOOçÔSOåÔSOåž{îaéÒ¥<óÌ3Ì›7_ýêW¤¦¦RTTĸqã?~ø€?ü?ü—^z‰ÒÒRzôèÁˆ#(,,døðá 2„Áƒ“““䡨*))aÅŠüóŸÿL|?øà6mÚ@Ïž=1b#GŽäÊ+¯døðáŒ1‚H$prI’$I’$©í‹áyžg,c9•Sy“7½–$©ED";„HIÊY1’$I’¤va›¸˜‹ùã·ü–)L :’Ô":wį̂Q£5jT½õ udxøá‡Ù±côíÛ—Öë&2pà@ú÷ïOJJJ‡£$PVVFqq1ÅÅʼnÏXqq1}ôÛ·o¨×±æôÓO·c$I’$I’tˆt§;/ðcÃD&ò¯Ñ….AÇ’$µ3ápØ!R’² D’$I’”ôþÎß¹€ H#·y›á :’Ôê²³³9餓8餓ëb±Ÿ}ö+V¬¨w{ýõ×™={6;wîv™ 4ˆPPP@~~>}ûöM,çææ’žžÔ¡©UUUQRRÂÚµkY½z5k×®eíÚµ¬ZµŠO?ý”+V$®•™™É Aƒ8âˆ#˜0aÿöoÿÆ Aƒ4h}ûö% |4’$I’$IRû”K.¯ð cÃÅ\Ì3ôìÙ“ž={Ò£GRSS[ù(ÕÚÚZ6mÚDii)¥¥¥¬[·ŽuëÖ±fÍÖ¬YÃÚµkY³f ëׯ'‹žžNnnnâý8qb¢àcðàÁôéÓ'࣒$I’$I’:®A âyžg<ã™Æ4f3›^¤E’thX"%/ B$I’$II©†nâ&îà~À¸ÛIʼnèRsäää““ÃW¾ò•}«««cýúõ¬^½šÏ>ûŒ5kÖ$:H¼óÎ;<õÔS¬_¿žÚÚÚÄsB¡P¢0¤gÏžôîÝ›^½z%ÖõéÓ‡ììlºwïNVVVâ–’’Òš‡tjkkÙºu+ååå”——³uëVÊÊÊX¿~=¥¥¥lÚ´‰ 6°qãÆÄýÒÒÒD¡@jj*}úô¡_¿~äåå1f̘Är¼È§OŸ>¾’$I’$IRv'ð$Or.çÒ~üŒŸI’ÔN„Ãa¢ÑhÐ1$ B$I’$II§”R.æbÞáæ2— ¹0èHR»“’’Bnn.¹¹¹n‹Åê lܸ‘ 6$îoذ÷Þ{/qÓ¦M î§k×®tïÞ½Á[VVÙÙÙ¤¥¥ÑµkW:uêD—.]‡ÃD":wîLFF™™™¤§§Ó­[7RSSÉÊÊ"jÝ«ãÅb1ÊËË©­­eÛ¶mTWW³cÇ*++ÙµkD£QvîÜIUUÛ·o§ººº^¡ÇÞ·òòrvìØÑàß/¼‰ß ˽zõJãÄ rZûõ$I’$I’tèÉ™ÜÇ}\Ã5ô£WreБ$Ií@$¡¬¬,è’‚!’$I’¤¤²„%œÇyt¦3oñ…Iê°B¡P¢ð )jjj…{A4V ±bÅŠÄv555õŠ,š+;;{Ÿuñâ’=Å»jì]</âØ{ÛòòòfgÙ³x%---Ñ)¥{÷îôéÓ‡!C†$Šaö.މ¯ËÊÊ"-Í_íI’$I’$IÑU\Å'|Â4¦‘K.§qZБ$II.‰PRRt IÁo%I’$IIc.sù&ßä+|…'x‚,²‚Ž$©ÒÒÒèÑ£=zôøÂûŠF£TTT°k×.*++Ù±cÕÕÕlݺ•ºººzW0ª©©aûöíûì#þÜ=ýþ÷¿à²Ë.«·>##ƒÎ;ï³®]»Ö+ÌÈÎÎ&%%…îÝ»“žžNfff⹑H„p8ü…Ž[’$I’$I’nçvÖ²– ¹¿ðŽá˜ #I’’X8&CÒA° D’$I’ÔæÅˆqwð~ÂÕ\Í=ÜC:éAÇ’ p8L8n°óDZtéR~øÃÒýJ’$I’$IÒ¡"ÄlfSB gqoñùäK’”¤"‘AÇtR‚ I’$IÒþD‰2…)ÜÄMÌd&÷s¿Å ’$I’$I’$©ÃK'?òGçpÎâ,Ê):’$)I…Ãa B¤$eAˆ$I’$©Í*¡„“9™y‘?ógþƒÿ:’$I’$I’$IR›Ñn<ÇslbpUTI’”„"‘Ñh4è’‚!’$I’¤6é]ÞåDN¤œr±ˆñŒ:’$I’$I’$IR›ÓŸþÌcïðÿοG’”„ì"%/ B$I’$ImΓ<ÉÆpG±˜ÅÅQAG’$I’$I’$Ij³Š(âqçaæNî :Ž$)ÉØ!DJ^„H’$I’ÚŒ1f0ƒK¸„˹œyÌ#‹¬ cI’$I’$I’$µy™È fð~Às<tIR‰D"v‘’TZÐ$I’$Iˆå ®àižæ7ü†iL :’$I’$I’$IRRùßãc>ær.çMÞd#‚Ž$IJáp˜ÊÊJêêêHI±ß€”Lü‰•$I’$®”RÆ3žWx…?óg‹A$I’$I’$I’Ò=ÜÃHFr.ç² AÇ‘$%H$@4 8‰¤æ² D’$I’¨Oø„“8‰õ¬g‹Ǹ #I’$I’$I’$%­tÒyЧH' ¸€J*ƒŽ$Ijã,‘’WZÐ$I’$io={ödÖ¬YAÇP+(î]̯Oÿ5‡o;œkÿ|-©ø á/AÇ’Ôøÿ!Iû5 ˜?>ÅååAG‘¤6oÛ¶m\|ñÅAÇ$I’:œÃ8Œçyž9‘k¸†ßñ» #I’Ú°p8 @EEEÀI$5—!’$I’ÚœóÏ??èjOñ7pgp…£óäÎAG’$æÏŸÀ´iÓN"©MûÖ·˜0a\tQÐI$I’$I’uG1‡9œÍÙ e(?æÇAG’$µQñ!„HÉ'%è’$I’¤Žg&3¹˜‹¹†kxЧèŒÅ ’$I’$I’$I‡ÚiœÆÿò¿ÜÄM<Ã3AÇ‘$µQñ‚h4pIÍe‡I’$IR«©¡†ë¸Žx€»¸‹ë¸.èH’$I’$I’$IíÚu\Ç2–1…)¼ÍÛ ehБ$ImL8ì"%# B$I’$I­b;¸˜‹y×yš§™ÈÄ #I’$I’$I’$u¿âW,g9çqoó6YdI’Ô†Ä;„X"%Ÿ” H’$I’Ú¿5¬a4£ùÿàu^·D’$I’$I’$©¥“Î\沓\Â%ÔRt$IR/‰F£'‘Ô\„H’$I’ZÔ2–1šÑĈñoQDQБ$I’$I’$I’:œ>ôá)žb! ¹•[ƒŽ#IjCÂá0`‡)Y"I’$Ij1oñcKy,d!I’$I’$I’$©ÃÅ(f2“ÿæ¿yЧ‚Ž#Ij#222HII± DJB„H’$I’Zij<ËxÆ3†1¼ÆkÎáAG’$I’$I’$Iêð¾Å·¸š«™ÊT–±,è8’¤6"FƒŽ!©™,‘$I’$rñr!—r)Oñ"AG’$I’$I’$IÒçîå^ŽåX.à¶²5è8’¤6 ‰Ø!DJBiA$I-'z’.Iju3Ã3¹5r+?ˆþ€Vüíl:’¤v¦{÷$ÿuN<_—’W6°sçNªÊÊ‚Ž"I’¤fŠD"„Ãá cH’¸tÒù`$#™ÂžáB„‚Ž%I P8ö»+) Y"IR;vß}÷Ñ·oß cH’:ˆºP³›Í«^嚥×PT\Ä|æKR;S\\Ì©§žÊqÇt”/Ìóu)yM>øàÖdfE’$IÍôÙgŸqà 7C’¤6¡/}y’'Ïxf0ƒñ£ #I’‰DˆF£AÇÔL„H’ÔŽuîÜ™I“&C’ÔD‰r9—³ˆE<˳œStJR{´téRb±XÐ1 Ï×¥ä6jÔ(Fù3,I’”tfÍštI’Ú”“8‰éLçû|Ÿ"Š8•SƒŽ$I H$±Cˆ”„,‘$I’$}!›ÙÌD&òñ*¯2ŠQAG’$I’$I’$IR}‡ïð7þÆd&ówþN.¹AG’$ Û!DJB)A$I’$%¯ÏøŒS8…Jx“7-‘$I’$I’$IJ2!B<ÄCd“Í…\H5ÕAG’$À!Rr² D’$I’tP>åSNáê¨ã/ü…£8*èH’$I’$I’$I:]éÊ\æòïñS~tIRÂá°!R² D’$I’ÔlËXÆIœD6Ù¼Îëä“t$I’$I’$I’$}ÃÎ,fqwð4OG’ÔÊ"‘Ñh4蒚ɂI’$IíJ(" ã r´•ìâèÍGs$Gò*¯Ò“žAG’$I’ÔmeÌÑ®ÇKI’S’$I’s—1õóÿŠ):Ž$©E";„HIÈ‚I’$Ij%íabЋ¼¯‹àOü‰ntk±¿«=¼^’$I’šÆóÿæñõ’$I’Ô’îá2 ¸€ œ,IE8¶Cˆ”„,‘$I’$5Éøçqüÿ¯C˜pБ$I’$I’$I’tˆ… 3‡9¬d%ßá;AÇ‘$µ;„HÉÉ‚I’$IÒý†ßp9—óm¾ W5A'’$I’$I’$IRKÄ åQf1‹ßòÛ ãH’Z!Rr² D’$Ij£B¡¡Pˆ]»vñ­o}‹=zеkWÎ?ÿ|V®Ušp! IDAT\Ùà¶\wÝuôîÝ›´´´ÄãUUU̘1ƒc=–Î;Ó¹sgŽ=öX~þóŸS]]]o_555Ìœ9“‘#G’™™IçÎ;v,Ï?ÿ|½í{ì1ÆŽKVV 4ˆüàlݺµÞ¾~ýë_sâ‰'Ò«W/:uêDnn.^x!óçÏoò6£¶¶–Ûn»þýû“‘‘ÁСCyøá‡ܶ)Çðæ›o2yòdòòòHOOçðÃç´ÓNãå—_>`žP(ToyÏ[s³¿öÚk„B!rrrüeLee%yyy„B!^}õÕfÛSCïÿ€Ù¸–k¹Û™ÉLˆ5|| KC5å=oêëÕÔ÷î@?#’$I:´ÏÜx¦9çÕ{®kÎø/ÊñRCŸ¯†ŽÏñ’$I’¤d4‘‰ÜÈ\˵,cYÐq$I-,FƒŽ!©¹b’¤f!FlNlNÐ1ÔN±9sZîótÿý÷·Ø¾%µyŸ}ÄoãÆ‹UUUÅb±X¬ªª*vÚi§5ºm,‹ÕÕÕžño4ºMaaalëÖ­±X,›:uj£ÛÅ÷×”mæukl¿sçÎMlÛœcÙsß{ßB¡Pì™gži0ÇžÛÐ6MÉ>zôèûÅ/~±Ïkp÷ÝwÇ€ØñÇ߬׮Á÷1j‰ñ­}sîïx÷÷XSÞó=~°ï]C?#’’Ë’%KbK–,i±ýOš4)6iÒ¤Ûÿž<_—’Äb-8žoÏ|±ñLSk΢¹Û;^ª¯)Ÿ¯†Ž­±u=æxI’Z^SÇb-=þ”¤V3iÒî›tªbU±1±1±¡±¡±±AÇ‘$µ ÿþïÿŽyä‘AÇ’Jkþþžoj‡I’$©[ºt) .dûöí,X°€‚‚Ö®]ËŒ3öÙö­·Þâ…^`ûöíìÀ]wÝÅo¼AVV=ô7ndãÆÌž=›nݺ±`Áîºë.î¾ûn^~ùe233™9s&kÖ¬!ò׿þ•óÎ;€Ù³góøãÓ·o_æÎKii)»víâ¯ý+Ç<Ë–-ãþçxâ‰'¸ùæ›Y»v-UUUlÞ¼™çŸžÓN;­ÉÛŒW^y…矞­[·²zõjÎ=÷ÜÄ1Æ5çXÎ<óLþô§?Q^^NUUk׮嗿ü%±XŒÛo¿}¿yâïG|yÏÛÁdÿéO ÀwÜÁÎ;ë+++™>}:?þñ›üzÅ÷ÿïšyWo¿šÔ¤rSñMœ·á¼fíkšòžèõjî{×Ðψ$I’ZŽã™ÖÑ”1Ds·w¼T_S>_‡‚ã%I’$ImM:é<Á”RÊu\tIR ‡Ã vÝ•ÔÆµX Š$µSØ!D‡Tl*^qXJn|~EÎçž{®Þú§Ÿ~:ĆºÏ¶üã÷Ùψ#b@ìá‡Þç±|0ÄŽ=öØX,‹sÌ11 6kÖ¬Fs}ùË_ޱ7ÞxcŸÇ>ùä“}zb]üj·C‡ÕÕÕ5úw5$þþß÷À}±©±©±N±N±§bO5š³©Ç¶÷cM}Ï÷·Ïæ¾wûû‘”\ì"©MÀ!âxæà4ç¼zÏuMC8^úâã¥ý}¾;ÇK’Ô¶Ø!DR‡c‡"/Ä^ˆ…b¡Øoc¿ :Š$©…ÜsÏ=±ž={CJ*m¡CHèó%IM"ÄæpEí@(bΜ9\tQË|žf͚ŴiÓZdß’Z^( ¬¬Œ¬¬¬Äú²²2;ì°zWfˆo»e˲³³ëí'‰F)--¥Gõ+--¥W¯^D"víÚEçΩ¨¨`ãÆôìÙ³Á\]ºta×®]¤¦¦ÿº2il«‘¦§§SUUÅ_ÿúW.»ì2V®\Yogœq¿ýíoéÕ«W“¶iŽøkQQQA8N¬Åb¤¤¤ÔËÜœc™3g—^zé~¯’ºçcñZw°Ùž}öYÎ;ï<?üpV®\IZZGq%%%<òÈ#\qÅfmHçΩ¨ªà¢ñløYæ0‡¯ñµFs6õØö~¬©ïùþöÙœ÷nÏ}5ô3")¹,]º€¢¢¢ÙüÜ|îܹ-²ÿ=y¾.%±PæÌÏ·Žg¾Øx¦)çÕ{®kêÂñÒ/àóu0ÇæxI’Z_SÇb-=þ”¤V»¶Âï»Ôþ}—ïr?÷óï0”¡AÇ‘$b=ô×_=Û·o:Š”4Zó÷Í7Miñ¿Y’$IR«i‰ñÉ#µµµÔÖÖRWWG]]]½I(ÕÕÕŒ=š?þ˜E‹qÇwpá…’™™ÉK/½Äõ×_ßämÆž„à_\öXn½õVb±ÿöoÿÆÿýßÿQQQA,cÇŽñ‹d8÷Üs1b›7ofæÌ™Ìš5‹’’ øÆ7¾Ñü¿¸0æeÌcó,iLc“‘j'{(Þóæ¼w{rr“$IRÛåx¦yçÕ{jê¢9Û;^:t/I’$Ij/¦3Œà".¢‚ýU%IÉgÏ ùHJ„H’$ImÜ_þò—z÷_ýu Фçy䑼øâ‹û<6oÞ<† RïÏgŸ}¶Ñý°xñâÄÕEºÅ¥¥¥1zôh¾ÿýïóä“OòÞ{ïðòË/7k›–Мcùä“O˜>}:GydbÒ«¯¾Úä¿/> ¨¦¦æä…BÜtÓMÜyçLŸ>€ï}ï{¤§§7k_»ØEÚ‹iðUøÏyÿÉ&4ëù½{÷Øç*¶{~ãšòžïïõjîçP’$IÁp<Ó<Í=¯nIŽ—êkÊç«1Ž—$I’$µé¤óOPB ßá;AÇ‘$b‘H„ÚÚÚ/&"©í² D’$Ijã®»î:^ýuvìØÁÂ… W8qb“ž¿òé 7ÜÀï~÷;6mÚĦM›xä‘G¸ñÆëm3yòdn¼ñFî½÷^>ûì3*++yë­·8ÿüóø÷ÿw`÷ÕV|ðAV®\IEE•••|ôÑG<ðÀŒ=€“O>™Ç{Œµk×RSSCii)O=õÑh´ÉÛ´”æK~~>wÜq[¶l¡¼¼œ'Ÿ|’iÓ¦5ùïëÙ³'O>ùä!;¶¯ýë :”²²2Ö­[GÏž=¹úê«›µìä\Î¥æ¸8 î½ìÞFßÿÆ 6 Øý9[³f Û·oç…^h0KSßóý½^Íyï$I’Ç3ÍÓœóê–æx©¾¦|¾ãxI’$IR{R@ð³˜Åïù}Ðq$I‡P$Z~®†¤C,&IjbĿĿCí›3§å>O÷ß‹í[RËb@ìüóÏO,Çoyyy±Í›7ï³mC¢Ñhì+_ùÊ>ûˆßÆŽ«¬¬ŒÅb±XUUUì«_ýj£ÛÆ]wÝun³ç¶û{|Ú´iMÞæ`^·¦>ÖÔc¹ãŽ;|lÊ”) î·¡uW^yå~_׿f{ôÑGßvÛmûöR+ŠŠõ‰õ‰½[ýn“Þÿ†²¼øâ‹ >gêÔ©>ÿ@ïù^¯¦¾wzý$%—%K–Ä–,YÒbûŸ4iRlÒ¤I-¶ÿ=y¾.%1ˆÅZp<ß8ž9¸ñLsΫôÚŠí;úxiOMý|9^’¤¶¯©c±–JR«™4i÷M:Ä®]ËŒeÆþûgÐQ$I‡È‚ b@lÆ AG‘’Fkþþžoj‡I’$©{ôÑG¹êª«ÈÎΦK—.|ík_ã7Þà°ÃkÒó322xå•W˜>}:#FŒ ‰D1b3fÌàÏþ3:u ==?ýéOüüç?OlÛ¥KÆŽËsÏ=—ØçÝwßÍüùó¹ð éÛ·/éééD" ¹ñÆy÷ÝwøÛßþÆUW]Å€HOO§GŒ=šû￟_ÿú×MÞ¦%5õXn¼ñF~þóŸ3xð`2228p ·Ür ³gÏnòßu×]wñío›~ýú‘žž~ÈŽaðàÁtíÚ•k¯½¶ÉÏÛÊVNçtÖ°†×ycÒŽiÒûß3Ï<“‡zˆ£Ž:Šôôtòóó¹å–[˜5kÖ>Û6õ=?ÐëÕÔ÷N’$IÁq<Ó<Í9¯n y¼´·¦~¾âxI’$IR{t'wr$GrÅ+ÉKR{ïRQQpIÍú¼ZD’ÔD!BÌaqQÐQÔ„B!æÌ™ÃEµÌçiÖ¬YL›6­Eö-©å…B!wîÜÙÿž<_—’X(sæ@ çÛÇ3j+f¼$Ijßš:kéñ§$µšøØµ~ߥŽg+(¢ˆ+¹’™Ì :Ž$é zÿý÷9æ˜cøç?ÿÉ!C‚Ž#%…ÖüýAcóMí"I’$IIlåÊ•¼ð Àî‰NMa1ˆ$I’¤Žà`ÆK’$I’¤¦Ä à~ůxš§ƒŽ#Iú‚Âá0`‡)Ù¤@’$I’ö'~eáéˆWÞóµ9묳6lØ~· ;ðg /0†­ØýœŽøúI’$I-ÍñLpj¼ÔßI’$IjÜE\Ä+¼ÂU\EEPt$IÒAŠD"€!R²±Cˆ$I’$%±p8ÌgœÁâ#ê¨K¬«¦šläðÏÿ“$IÒ!tÄ0bDÓijjà’KZ?“$I’$I™2RS÷]ŸžW\ÑúyÔ¡Mf2—qWqØtIÒ„Ãa;„HIÆ‚I’$IJ2Ó™N*û~‘SC ò!'s2›Ù@2I’¤v¬¡É4¡Á€Ád’$I’$©!—^ µµû®¯©I“Z?:¼{¹—L2ù&ß$F,è8’¤ý°Cˆ”|,‘$I’¤$²„%,bµ4ðE»;…,g9§p ¥”¶r:I’¤vìÒK¡®®þºÔÔÝ…"’$I’$µ%ùù0j¤ì15,%e÷ºþý‹¥Ž«Ýx‚'˜Ï|~ͯƒŽ#IÚ B¤äcAˆ$I’$%‘Ì ôýnSGËXÆ‹¼ØJ©$I’:€œ3¦þdšº:¸è¢à2I’$I’Ԙɓww¶ŒKIñ¢ ÔñÏù1ßã{|ȇAÇ‘$5"FƒŽ!©,‘$I’¤$ñ)ŸòGþH5Õ >žF_âK<Çs\Á­O’$©ý›<ù_Ë©©0v,ôîXI’$I’5iRýû±\pA0Y¤Ïý”Ÿr,Ç2…)TQtIRì"% B$I’$)Iü/ÿK*©û¬¯;†cxŽçx‡w˜ÈÄÖŽ'I’Ôþ]xáîB¸= D$I’$IjKzô€ vcSSw/÷êt*upi¤ñ±‚ÜÌÍAÇ‘$5À‚)ùX"I’$II` [xëuIù|Hw$G2—¹‚H’$µ´ìl8ýt… %Î??èD’$I’$5î²Ëvw‰Åv/KmÀÁÜÉ f°€AÇ‘$í%FƒŽ!©Ò‚ I’$IíI]]7n¤´´€ž={Ò«W/RR¾X=þ½ÜK 5ÀîB1ŽäHnåV.äBB„¾pvI’$XùÙg“5o»Æ#-¡SÐ$I’$Ií^MM Û·o ¬¬¬ÁmêêêØºuk½u)pLZ!àÝþý©[º´ÞãÝ»woðû‹P(DVV]»v%-Í)f:´®á^æe¦0…÷yŸl²ƒŽ$IúœB¤äãÙº$I’$@ee%¥¥¥¬_¿ž 6PZZʆ X¿~ý>Ë¥¥¥ÔÕÕÕ{~JJ ={ö¤gÏžôéÓ‡Þ½{ï³Ü»woúôéCÏž=ÉÈȨ÷ü(Qîâ.j¨!Dˆ! ávnç<γD’$éÚ±c«V­bõêÕ¬]»–5kÖ°jÕ*Ö¬Y“¸¥VVR L}ùeþ“““CAAùùùäååѯ_? ÈËË#??Ÿ>}ú}X’$I’¤V°mÛ6¶lÙByy9;wîdçÎlݺ•íÛ·'î———³cÇvìØ‘¸_UUÅÎ;‰Åb”——'öU[[KEEžB÷#ŸÿyåÉ'¡ý„Ãa"‘iiitíÚ€¬¬,B¡]ºt¡S§Ndee‘™™I—.]ÈÌÌ$++‹.]ºÐ¥KºvíJ÷îÝë=vØa‡%ö¥Žç7ü†Œà?ùOåÑ ãH’>‡-‘’Œ!’$I’:¤;wÖ+ðØßrü ˜¸Î;×+à0`'žxâ>…@£û...N,ïÚµ«Þþ³²²ûèÝ»7ÛÎÞÆ–+·Ð{ko¦|4…s«Ï%§w»úì¢K—.­öšI’$%³êêj>û쳋<â÷÷<ïëÚµ+0xð`ÆŸ¸¿5'‡ÿ_y9ßÜëùo¾ù&øÃX¿~}b?äçç'nýúõK,Ç÷—™™ÄK"I’$IjĦM›Ø°a7ndýúõlٲ倷ššš÷ÕµkWºtéBçÎÉÎÎNHdff2`ÀÒÓÓ-°ÈÈÈ sçΤ¤¤Ð½{w ñ®ݺu#55µþÊX €OBõ/0U[[˶mÛÜÏžmݺ•ºº:víÚEeeeƒ,Û·o§ººšòòr6lØ(€)++cçÎìÚµ+ÑádoééévØa‰[vvv½ûñ[ü"[½zõ¢G îKÉ¥=x„G8ƒ38‹³¸”KƒŽ$Ibw‡/Z*©uY"I’$©Ý¨¨¨`ݺu”””PVVÖèrIIÉ>Eáp˜ììlrssÉÉÉ¡ÿþŒ=šœœœzëãËM5tèÐ&åÞ_Þ/l$ÿ|jž­áÎuwòóºŸï“»¡Œ{/÷éÓ§Ñ/‰$I’’]ü©w>‡üûã÷óóóIOO?4/”$I’$uPuuu¬[·ŽU«V±víÚÄE›Ö­[—(üX¿~=7n¤ºº:ñ¼ÔÔÔ Ž8âˆ×ïYô/âÐn[·n­W,ÒXqÍæÍ›ùøãë­‹é:uêDÏž=ÉÉÉI‰äææÒ«W/úôéC^^ýû÷÷{‘$p§ñïŸÿ7†1Pt$Iêð"‘ˆB¤$cAˆ$I’¤6->±oëÖ­cíÚµTUUÕ{n|b]¼0¢°°°Á‚‰‚‚‚@[’G""‘¹¹¹5¾á½PYYÉæÍ›÷ûz,_¾œ’’JKK÷¹"Yvvö~‹FâËN:”$ImI4¥¤¤dŸ"øòG}TïJ£ñsžxQÅ„ êÝïׯ߾WL=„:uêDnnî~ÏïâÅÌ ¯¼ù曬\¹²^'¹ììì‹Eâ÷û÷ïï$I’$IZEEÅÅŬ^½šÕ«W³fÍV¯^ͪU«X½z5Ÿ}öY¢Ð#%%…^½zÑ«W¯DQÁСCÉÍÍMÄ;y÷êÕ+à#k?ºwï~ÐE27ndãÆ‰"žÒÒRJJJذa%%%,Y²„ÒÒR6nÜH]]°û‚yyy‰ŽñNñ®$‰ÊCÔAø¿à Þàr.g H¥å~g#I:°p8lAˆ”d,‘$I’ÔªšRÐ_Þ» ¡S§N~øáõ:b 8°Á‚†‚‚ÒÒÚß'###1¹°°°ð€Û¨ féÒ¥”••±fÍš}Úµï]PÓØr~~>ݺuk©C–$I@YYYƒ…ñûëׯ'‹õ»iäääPXXÈ´iÓ…Gyd Å¾M‰DE‰¿.{¿&Ë–-cþüù¬\¹21É%##ƒ¾}û6Z02hÐ ¯L+I’$)éUUU±víÚÄØhùò剱Ҟc¤=ÇŽGuÔ>!ÛëwíY¼€çè£>à¶ýžá¥—^JÜ‹_€aذa&>#C† !33³%IŸ æ·ü–QŒâ—ü’ïñ½ #IR‡‰D¨¬¬$‹ …‚Ž#© ÙH’$IúÂ***šTàQVVƆ _ÈÀî/eö.0h¬“‡­½›/^<ÓzãW«Ž¯ßSCïccË999þâH’¤$^ ºwWøÄŒÕ«W×+ŽOÄ8p cƌ٧¸¡#KdggSTTÔh—‘ÊÊJ>ûì³_ßùóç³bÅ ¶nÝšØ~Ï Q uqB”$I’¤¶bûöí,[¶Œ>ø€>ø€eË–ññdzvíÚÄäÄüü| Ä Aƒ8ýôÓË^Ĩƒ;ÐxzÛ¶m¬^½š+V°bÅ >þøcV¬XÁÂ… ë}Æòòò{ÅÅÅÌ›7oŸ ‰°ïg¯±åüü|ÒÓÓ[ê%Iêâ]Âö.$ˆ/úé§Äb1`wqBß¾}…&L`Ú´i‰ûƒö ­ˆD",iì}^ºt)óæÍcåÊ•‰ß½ß罋G|Ÿ%I’$íOII K—.MÜþú׿²eËÒÒÒ(((`ذaL™2…ÂÂB† ÆÐ¡Cd¯V‰D>|8ÇïÛ¡"þù]¾|9Ë–-ãÏþ3wÞy'•••dffrÌ1Ç$:“5éûýK )<Â#Ã1ÜÊ­ÜÆmAG’¤)‰» B¼À§”,‘$I’’Xee%›7o>`GII 7n¤¶¶6ñÜŒŒ ;ì°zëØà„û^½zy¥f}!ñŽ Mq î4K—.¥¬¬Œ5kÖ°}ûözÏÝ»;McËyyytïÞ½%U’¤¤±¿ÎÅÅŬ^½ššššÄö{vŽ(**²sD;’«¾{i IDAT˜¬Òýu‚™?þ;Áì]øàÞyç/^Ìüùó¹çž{¨««#77—N8Q£F1vìXŠŠŠÀpwp-×r:§ó¾t$Iêpâ!Ñh4à$’šÊ3LI’$©©¨¨hRGYYëׯO\‘vO†ß{â{aaaƒâûôéãUµÔ&5å*Öqúy)..æÍ7ßL¬ßSC?/-çää8ÁU’”Tö7ݺu¬X±‚­[·&¶UÆÿúˆíÛ·×Û_CÝEâ÷ûõëg¡®$I’ÔÖ®]Ë3Ï<Ã3Ï<Ã믿Njj*ãÆã¿þë¿7n#FŒp*íeèС :”k¯½¶^7W^y…iÓ¦Q[[ËØ±c9ï¼ó8ï¼óèÛ·oБ•K.÷p—qgs6grfБ$©Ãˆw‰F£'‘ÔT„H’$IŸkJ÷€øòÞÝ2228ì°ÃêMø8p`ƒÁ{õêEZš§âR[¿ÊuS¨ ÐÒ¥K)++cíÚµlÛ¶­Þs÷îÔØr^^žWo—¤v ªªŠM›65ZðñÉ'ŸP^^žØ~Ï. ñNq{Þ·¸PíIFFFâ³Ý˜xÑîÞ?CÅÅÅÌŸ?ŸÕ«WSSS@zz:=zôh°ÓÈž÷%I’$XYY¿ûÝïøýïÏ’%KÈÌÌ䬳Îâ±ÇãÌ3Ϥ[·nAG”’FJJ Ç{,Ç{,7Þx#Û¶mãÅ_äé§ŸæÇ?þ1×]w'œp—]v“'O&+++èȸ”Ky–g¹†kxŸ÷9 ;¨HRk°Cˆ”|œ…&I’¤v­¢¢¢Ieee¬_¿žX,–xn8ÞgBvaaaƒµsrr…B©¤ E"‘N^Œ;пKÅÅżùæ›Mþw©±eÿ]’¤`Äÿ-ohÂúºuëX¹r%uuutêÔ‰¼¼¼Ä¤ô &0yòäÄ„õAƒY (í%^´ÛXÇ·êêjJKK?{þ,.Z´ˆ’’’zçX{]5T0Ò¯_?233[ó%I’¤6eÑ¢EÜÿý<ù䓤§§3iÒ$n¾ùf¾úÕ¯’‘‘t<©]èÖ­—\r —\r Ñh”ùóçóÇ?þ‘Ÿüä'üð‡?dÒ¤IL›61cƵÕÝÇ} g8×s=òhÐq$©C° DJ>„H’$)éèJüñå¦\‰?~•e¯Ä/©µD""‘¹¹¹íwÛ¦t.Z¾|y“;5¶lç"Ijšøyhc«V­bçΉíãç›9992qâÄz“Îû÷ïOJJJ€G$µ?éééäææî÷\+&ºŠìý³¼téRV¯^ÍŽ;ÛÇ‹mê.2pà@úõëGjjjk¢$I’Ôâ¢Ñ(?ü0÷Þ{/Ë–-£¨¨ˆ™3gr饗ҵk× ãIíZ8æœsÎáœsÎá®»îâñÇgÖ¬YœtÒIríµ×2uêÔS•E³™ÍœÁD&rI’Ú½ŒŒ B¡Ñh4è(’šÈÙ’$IjÊÊÊXà±nÝ:Ö¬YCuuu½çÆ'çÄ'7ýìÝy|SUÞ?ðOº¤I÷”.Ié%hÊ"-`a) ‹30Q´2¢¢0Zü3 ó<*ó(Ê8ŽâÌð8²ª£)*ÐÊb[e tO›Òt¥{ïï&yš6iRhzÓöóæÕ7É]¾'7¹9÷Þó=Gc±Ñsxx8ÜÝÝE*!QדØBCC­ö‚Ý–­c©V«ÅÁƒí:–Z› ƒT*uT‘‰ˆDÓØØˆ²²2‹£z ÓüíGÐh4f Å£££áéé)b‰ˆÈ™Lfst7c½ªý1A«Õ"-- ¹¹¹¦d\©TŠXM1N9»ÆÆFüíoßÿüg $''cË–-6;¶!"ÇðõõŲe˰lÙ2œ:u ›6mÂÊ•+ñ_ÿõ_HMMŲeËúÅõú©˜ŠÇð–c9îÁ=PB)vHDD}šD"‡‡G!êE˜BDDDDaO¯öÆi{zµ‰‰±Ø@9$$„=±ý‡B¡€B¡°k^[£-effÂ`0 °°•••f˶mÉÚôÀáïïu™Á`èäÑvº³ÆÝ‰‰‰f» Äã9Ì”)SpèÐ!±Ãè÷Œõ*kI¹MMMÐëõ“ÈÒÒÒLÓFí“ÈÚ'DEEÁËË«§ŠGDDDDÔÁ¾}û°jÕ*ã·¿ý-~÷»ßA©d£kgs«çŒ]]¾»ÏQyÎ{óFÑ£G㥗^Â믿ŽÕ«Wã¯ý+Þxã $%%‰žÃ½7†4¤ û±_ìpˆˆú<¹\ÎBˆz&„‘ÝêêêìJð0N·%“É:4V«Õ«T*H$‘Ji]]]T*Fމ£GöÈ6Û¾‚ Øœ×Ò<ÆuØZžº_vv6žzê)œ:u 555¬ï±÷Ÿ˜Ÿ“žÞ¶£·gïú¿úê+¼ýöÛ8zô(ZZZ‹'Ÿ|=ôPår¹Íž±lÿµZ-ÒÓÓa0 ÓéÌÊnéøomZ©TÂÅÅÅ‘Å&¢>ª¾¾ÞÔs¿¥ýóòòL¿ÅÀ†ÞÆFØ–>"##EO:fݳsÎXwé®÷¤µµõ#융8Åv+ûTŒz•»»»i$7k½%wvlÊÌÌDnn.jkkMó·=6YmÄŽMDDDDÔ÷\»v O>ù$vî܉E‹áÕW_ExxxmßRÝßßC‡ÅÊ•+ñÀôX,]Õ•{ÝåVÏ--ßÙyâÍnÏÚ9ž£Ïy퉡· ÅúõëñÌ3Ï`õêÕ˜={6-Z„¿üå/;<‡ñ†7>ÂG˜ˆ‰Ø‚-x‰QŸ&—Ë9BQ/„"""¢~ÎVñÆéüü|TWW›-Û¾‡ø˜˜ÄÇÇwhà???‘JØ}>þøcŒ;çÎÃÅ‹ëðm ‚pË ¶Œë ž·dÉ<ÿüóøüóÏqìØ1L™2¥Ëëè©ý'æç¤§·íèíÙ»þ{ï½#FŒÀ_|áÇãâÅ‹X¾|9òóóñ‡?üÁañÝ ¹\¹\ÞiƒG£††\»v­Óß”œœ»Gˆ²6 77^Ö ê:ë…¿í´Qû^ø5M¯ì…ŸuOçØNwl»«ó>|øfÂêõneŸ:k½J&“ÙLÀµ6z‘V«EZZZ§£µOáèEDDDDÔU.\@RRªªª°oß>QFh.ÚÜÜŒ’’>|O=õJJJðÔSOõx\öèŽ{]u«çŒ]]þf·gí¯'Ïyûúý±ˆˆlß¾=ô}ôQŒ=À°aÃÄÍa&`žÆÓX˜ŒÉGÏ%¯õ72™Œ !D½ˆDèkiÐDD&;±Ày{"¡ÞC"‘`çÎëÙfÓ¦MHIIqȺɹ »FðÈÏÏGSS“Ù²Æ:l5Æ ‡»»»H%Çĉ±bÅ |÷ÝwpuuÅ«¯¾Ú#Ûµ·Q^gó9k/À}»»;êëëíêÅ×öŸ˜Ÿ“žÞ¶£·gÏú% Ξ=‹#F˜ž»|ù2âãã¡Ó雳±÷7«  fËÚû›©T*R ©»eff€Í䤛e¬›ïÚµË!ëo‹õõŒÇk mC»»»#00Ðjchãt_Àº§mÎVwq†÷ÄÎK{7[_®W566¢¬¬Ìê1òòå˨¨¨0Íß6)ÎÒ12::žžž"–ˆˆˆH|öž‹9úü“Hl§OŸÆäÉ“qÇwà“O>A`` h±X;HKKCJJ ´Z­QÙÖ•{ÎÌQç‰Îpþé 1ô„ÒÒRÌ;ÙÙÙ8räFŽ)vHs×q'îD$"ñ¾€}7釈HLÇǼyóðÒK/‰ ‘ÓëÉëÖÚ›º8|ËDDDDtËêëëQTT„ììl¤¥¥aë֭ذaV¯^Å‹#)) qqq …««+0|øpL˜0‹/ƺuë°{÷nhµZ( h4¤¤¤`óæÍØ¿?222PXXˆææf”——#;;éé騵k6lØ€µk×"%%IIIHHH@LLL¿K¹té~þùgÌœ9<ò¶lÙ‚æææóI$H$üôÓO7n<==1aÂ\ºt ÙÙÙ;v,¼½½1iÒ$äååu[|ÆŽŒÛ—H$xôÑGÍæÉÏÏÇìÙ³áããƒ<øàƒ¸víZ‡ueeeá¾ûîƒ|||0mÚ4deeÙGVVf̘aZvÆŒ¦e9bŸQMM |||PTTdzÎÍÍÍ4ß‘#GÌÊÖå0®óòå˘;w. E‡øºZζënnn6•eõêÕV×ç,ûÏZYíYßþýû™L†¨¨(<ûì³F€ììl̘1ÞÞÞðóóÃý÷ßoõ»àÈ}hm{¶ö+œ={S§N…——|}}1}út|ñÅV×;xð`SL ¦çA0k´ @CCC—cïÍ Ôj5°`Á¬X±k׮ůqàÀ¤§§ãòåËhhhÀõë×qùòe;v û÷ïÇk¯½† ˜.¬dffb÷îÝHMMŬY³0a 4Ëå4h””„Çk׮ņ °{÷n¤§§#;;ƒAäw„¨o©¯¯7õr¿uëV¬[·?þ¸©þêããcª»N™2«W¯ÆP\\Œ˜˜¤¤¤àƒ>À¡C‡pùòeÔÕÕ¡¨¨صk^{í5¬X±Ât,è+É ¬{Ú_wéîíØ[§i¯»Þkõ{⯬¬Ä³Ï>‹˜˜Èd2¨T*,Y²'OžìRœ–âéÊûlO}ª»ë„}¹^%•JM£´-X°©©©Ø°avíÚ…ŒŒ  SéСCذa’““­V‹Ý»wãÙgŸÅ”)S0|øpxyy! qqq¦:‘ñšAzz:´Z-Z[[Å.69Xqq1¦M›†Ñ£GãСC¢&ƒtf̘1f÷Œì=w³çZîÍž—vvÀÞ{]½¶méõ®œ·Y[¾íkmÏ­Ås+çÎÖâoÿ'“ÉLót×}k×êíÙÆÕ«WÍâ»téjjjÌž»zõj—cê.ÁÁÁ8|ø04 ¦M›†’’Ñbq4Oxâ#|„¯ð>ćb‡CDÔgÉårŽBÔ›DDÔ% ìvŠõ„;÷yÚ¸q£ÃÖM·îúõëBaa¡‘‘!ìß¿_زe‹ðÚk¯ O?ý´œœ,$&& Æ T*•ÀìO&“ *•JÐh4ÂÌ™3…ääd!55Uxë­·„-[¶‡²²²„ÂÂB¡µµUì¢ö «W¯~÷»ß™ÇÇÇ ûöí³8/árrr„ŠŠ aùòåÂØ±c…yóæ™žûío+ÌŸ?ß®mÛ[mïl>ÂÔ©S…Ï?ÿ\¨®®òòò„ K–,1›ïâÅ‹‚R©Þ}÷]¡´´T(++¶mÛ&ÄÄÄùùùnÿâÅ‹BXX˜ðþûï %%%BII‰ðÞ{ï .^¼(‚ ddd¾¾¾Bss³i¹>úH8p ðæ›oš­ï¶Ûn222l–ãþûï¿¥r¦L™"?~\¸~ýºðÙgŸuú^ÚSζ붗ØûÏR ]}7nÜ(ÔÖÖ ÅÅÅÂâÅ‹…‡zÈlžK—. ááá¦÷N¯× Û¶mî¾ûî[ÞvWö¡µ²Ú³_ÏŸ?/ :T8tèP[[+dgg &Lè°¾¶322„¨¨(áܹsÆ$‚°råJá¾ûî³9ÙÖ•ßY‰DrÓ¿³---bµßÉÈÈèðûÐ,X ,X°Àaëo«/Ô×ËËË…ŒŒ a×®]Â[o½%¤¦¦ ,4M‡ï—L&bbb„ÄÄDÓ÷jãÆ¦ïTuuµØÅq¬{Ú_wéîíØS§±VþîxO,­ÇÞøgÏž-¬]»VÐétB}}½ðÃ?ñññÖSìÑ•:¸=õ)GÕ Ûc½Êœñx½ÿ~aãÆ¦ãu||¼#¸¸¸˜Ž×R©Tˆ‰‰âãã… ˜êA»ví222„ŠŠ ±‹CDDtKì=sôù'‘˜æÏŸ/ <ØiÎÅ­§:tH:t¨Åùm»Ùs-·»¯©×Ù•{·zmÛøœ=çm-ßÕíu×¹sûÇ ÂèÑ£…wß}W„îÝGÖ®ÕÛ»sçÎ £G6[¶µµU|·Ývà£>ž={°oß¾óGµøÅ/~(**ÂÀÍž+((€F£±«‡{‡³îl>‰D‚½{÷böìÙ¦ç®\¹‚„„šž{ðÁ1bÄ<ÿüófËðÁ8{ö,6lØ`uû>ø F+V˜=¿~ýzœ>}Û¶mƒ P©TØ»w/ÆHLLÄêÕ«±fÍ?~À^±P\\lÖ[”¥rœ?“'O¾érH$|ýõט8q¢Õ²uµœm×mï)—ØûÏR ·²>ƒÁ€Áƒ›õ@–œœŒ¸¸¸ïÝG}„‡~ø¦·ÝÕ}h­¬öì×E‹aúôéHNN6ÍsþüyÜvÛmfë3®?''÷Ýwvî܉1cÆXŒ%//GÅÛo¿+W®àûï¿Ç Aƒì. ݺ††\»vÍæïwQQôz}‡^ú …ÍßïÐÐPDDDÀÍÍM¤RöŽr×X7ßµk—CÖß–³××uÛââbhµZhµZ³Çyyyfß…B˜˜ÄÄÄ@¥R!44Ôì±J¥º©œúÖ=»Vwqôv,ÕiŒÛn_þîxO,­ÇÞø}||››‹€€³mÄÄÄX¬§Ø«+up{êSŽª¬WÝŠÆÆF˜çÛN_ºt •••¦ù×0¬÷ÃÃÃûÝÈ¢DDÔ{Ø{.æèóO"±hµZÄÆÆbÏž=˜5k–Øá0?OiiiAII >ŒçŸëÖ­ÃâÅ‹;]ÞÒ¹›=×r»ûšºq]¹‡p«×¶ÏÙsÞÖÙòÏÚ:‡ì®sgxâ‰'PQQíÛ·è¾}ÔÙµú®lã®»î‡~ˆ‘#G>ÿüs¼þúë8|øp§qô¤={ö`þüù¸t颣£ÅÇaÐ 4PA…/ñ%$àuG"¢î4uêTDFFâ½÷Þ;"§×“׬µ7eBQ1!„ºBz«W¯";;z½:%%%Ðëõ())N§ƒ^¯‡^¯Gkk«i©TŠ   C©T"88ØêtPP\]]E,!uæÀX·nÒÓÓMÏÕÖÖ"<<999P*•fóK$TUUÁÇÇÐÚÚ WW×Ϲ¹¹™}f¬é®FyåååP(¦çZZZàîînƒR©Ä‰'i¶|II &Mš„œœ«Û·¶ì•+W0~üx–,Y‚ØØXüþ÷¿Gaa!æÌ™ƒS§NáÞ{ïÅæÍ›õë×ãܹsøè£l–£¡¡r¹ü¦Ë!‘HP[[ OOO«e»™r×Ý] !ŽÞ–bè©õét:¨Tª›ÞvW÷aWbk¿_•J%~øá„††Ú\¿V«Ebb"6oÞŒI“&u:odd$æÍ›‡çž{®Ã1…œKKK ôz=JKK¡ÓéPZZjuZ¯×›%€º¸¸ ((AAAP*• APPBBB T*µZ¨¨(ñ Ø 0!¤ûÔÖÖâøñãÈÏÏG~~>rssQPP€üü|äåå™ Œ°°0„‡‡#22ááᦿÈÈH¨T*¸¸¸ˆV–¾„uÏ®Õ5Ĩ#uå¹®Æji=öÆ?uêT\¿~/¾ø"­&!ÞLBÈ­ÔÁ-Õ§Q'4ÆÊz•ã”——›~#òòòŸŸ‚‚äææ"//EEE¦DAWWW(•JDEE!<<Üô…‘#GvاDDD=‰ !ÔßmÚ´ Ï?ÿ<®]»æ4÷¦,u ¡R©°uëV$&&Ú½Žöç¶®å:â|±«÷nõÚ¶ñ9{ÎÛ:[þVBº²ÞÎÖ÷Ïþ/¿ü2N:oooݳl]«ïÊ66lØ€óçÏãoûà¾ûîÃO<ûÓ8zRss3 €7Þx>ú¨Øá8Ô œ@<âñwü¢o—•ˆ¨§Íž=>>>øÇ?þ!v(DNÏBØ-%‘’’’••eõu///Œ=#FŒÀðáÃ1bÄÜvÛmLôè#6oÞŒ¥K—š=çåå…ùóçã£>ÂêÕ«;,cl|ÀÔ8²ýs=›ÝöFp£Nû®]»fµ!²\.ïtýeee\©T*”••™OŸ>7nÄïÿ{lß¾‹-üú׿Ǝ;ð /àÀøíokW9<<C&“¡¾¾^ì0ˆÈNL!"""²ÃñãÇ‘››k6:Hûé‚‚œ>}ÚjOàm{ÿ¶6íáá!b)ÉN‡Ï>û û÷ïÇ#<ÒáõØØX‹òz«ÀÀ@ääät¸q`ï²ÅÅÅ.š›Ýt™:u*–.]Šºº:üóŸÿÄgŸ}˜7o~ùË_bÙ²eøþûï±gÏQÊaϺí)§º»Üö®/99C‡Ezz:ÂÂÂL½Q·ïÕ-00:®C/[퇑ïʶ»‹½ûuÀ€(++³9B¬]»±±±˜9s&öíÛ‡±cÇvwØÔÃ,Ö,M[9lèСÖØ[6õ¤U«V!))ÉldÜÜ\Óˆ!»wïÆ¶mÛLó+•JSˆ0¥RÉQBnëžânÇÞ:MO³7~???¬_¿ëׯGYY>Œ·Þz ÇŽÃÇÜ#qÚSŸræ:!YWQQÑáw¢íãÂÂB455¸q-D©T"..ÎìwÂø»1dÈ‘KCDDDÔ¿ 8hhhpÚûRþþþX²d òòò°fÍlÙ²Åôš½çnö\ËuÄ9‡3ßCèŽ8w®­­Å¼yó°nÝ: >ÜìµîØG¶®Õwe!•Y IDATAAAˆÇŽ;ðÓO?áé§ŸýºA{õõõ(((@XX˜Ø¡ôˆ—ñ2>ŧx# iÀ¹öQo%—Ë¡×ëŃˆìÄ„"""";øúúâŽ;îÀwÜasÞºº:£¨¨ƒ¡Ãô©S§LÓƒÁlY™L…BÐÐP¨T*›Óäx[¶lÁÃ?ŒM›6Y|]­Vã›o¾Á=÷ÜÓÑuÔœùË_âÈ‘#¸ÿþûÍž?vìV¬X~øÁ겉‰‰øä“O°råJ³ç÷ìÙc6¤»B¡À¨Q£ðÎ;ï˜@7z›U*•X¿~=âââàïï/J9l±·œ]%öþ»•õ}ûí·Ø±c‡Ù>»~ýz‡õM:{÷îÅŠ+Ìž?|ø°ÃËb‹½ûuâĉHKKÃâÅ‹MÏ={ .ì04ýÂ… ÿøÇ?0wî\9r±±±¶ÝÓ=¶“¹ºº:‹¿×–¦KJJÌ’<Œ¿ÛÆßåÐÐP¨Õj‹¿Ýl(OÎÈÕÕ·ß~;n¿ýv«ó ¡¸¸Z­Z­EEEÈÎÎÆ››kê ÒÝÝ ELL bbb R©LÓdëž7t÷ï}w×i¬qT{ã—H$ÈÏÏGXXñ«_ý S¦LéÐÉQqÚ[Ÿrdõª›ÓØØˆ²²²ÇzããË—/£¢¢Â4¿L&3Ûo¿ývLž<ÙìXÝm½Q÷›6m°sçN³ëœÎè‰'žÀàÁƒQ^^Ž€€öŸ»Ùs-×祎º‡àH]9O¼ÕsgK–-[†±cÇbÉ’%f1 ‚Ð-ûÈÖµú®n㡇Âÿ÷£ººgΜéjqnÇŽhjj´iÓÄ¥GxÀ[±ã0›° ãq±C""êär9êêêăˆìÄ„"""¢n&—ËM àl±§jff¦]P;K qº¡¿{‹>øÀ¬gìö–.]ŠÍ›7;E£¼â»ï¾C\\¾þúk,]ºùùù]ZÇÚµk1kÖ,´´´`Ò¤IJ¥8zô(–-[†wÞy§Óe׬YƒI“&Á×׳fÍ‚D"Áþýû±~ýz9rÄlÞ3f`íÚµ;.\¸K–,ÁÚµk»ww–Ö®”³+ÄÞ·²¾ñãÇãùçŸÇÿûÿ¡¡¡ÈËËßþô'‹ë›8q"|||””WWW¤¥¥á•W^qxYl±w¿¾øâ‹¸ÿþûŠ»ï¾—/_Æ#<‚§žzÊ꺱~ýz̘1ééé 1½‰D‚ôôôn/Sf+9Ó8]TTdÖ¸蘜©R© Ñh˜œIý–±¾©V«-¾ÞÔÔ½^o±qZZšiÚ¨m#â¶É"ÆÇQQQðòòê©â9Ö=ó{ßÝukºã=¹•øàÑGÅo¼Áƒ£¢¢6lèÐàÈQqÚ[ŸrTõ*ë C‡$¶Û&÷I¥R 0Àt|NLLDrr²éñ Aƒn)yŸˆˆˆˆÄ§R©ððÃãùçŸÇ´iÓ,vHV")) ~ø!V­ZÀþs7{®å:â¼ÔQ÷©+牷zîÜÞÆqöìYœ8qÂâëݹ¬]«ïê6fΜ‰Ç{ Ë–-sºdxN‡ÔÔT,]ºÔÔ![pîªÿüKD"aØ!õz2™ õõõb‡ADöˆˆ¨K @Ø)ì; ê#;w:îó´qãF‡­›z^CCƒPXX(dee ‡¶lÙ"¼õÖ[Bjjªœœ,Ìœ9Sˆbbb777€ÙŸB¡† &ÄÇÇ ,ž~úiaÍš5ÂÆ…ýû÷ ÇŽ._¾,466Š]T§ÑöýÛ³gO‡×?ÿüs³yÚ/ci==g+[víÚ%DGG R©T\عs§Åõeee Ó§O¼¼¼oooaêÔ©BvvvîCkóÛûùýöÛo…øøxA&“ áá᫯¾jöºŸŸŸÙú Ͷ¹mÛ6Ó¼ãÆÆoWÜý]yy¹••%;vLصk—ðÖ[o kÖ¬RRRÌ~¥Ri§¿3gÎRRR„5kÖo½õ–°k×.áØ±cBVV–PUU%v1ÉBFF†ÃÖ¿`ÁaÁ‚[[ý¥¾^WW'\¾|ÙT‡}íµ×„””!11Q6l˜àííÝá;«ÑhLß××^{Mزe‹pèÐ!áòåËBss³ØErÖ=»§îÒukÛèŽ÷äVâOKKæÌ™# 0@J¥Btt´ðì³Ïvø}³gw¼ÏöÖ§º»N(ý·^eëëååeók¬õåc,Q[öž‹9úü“HLÂàÁƒ…;ï¼S(//5[ç¢ß~û­éu½^ߥëѶ®å B÷œ—Þ̹Qw]ÛîŽsήœÏÞʹ³¥ç<<<:¼íç¹Ù}Ô•kõ]ÙFSS“%ÙŒ¡'•—— #GކÚ/¯u× õ‚ZP ÷ ÷ ­B«Øáõz/¼ð‚0jÔ(±Ã êzòú`¹½©ä?/‘$`'vâ< v(ÔH$ìܹ<à˜ÏÓ¦M›’’âu“ó³·wôüü|TWW›-kì¹¹³QG ÂÃÃáëë+R ‰ˆ¨7khhÀµk×lŽ”UTT½^ææfÓ²°k”¬àà`¸¹q€Ô¾$33 Ñh²~cÝ|×®]Y[¬¯ÿK½×·î¬÷zK£( ‘KDDtë:…©í´Ga"""²½çbŽ>ÿ$Û•+WpÏ=÷ÀÛÛû÷ïGll¬Ø!õ »víÂÁƒ±uëV±C19þÛÆÏWLLŒÅÏ]HH‡R'""º ÆßÚÎ’ ƒÅ‘Ž?Ž¢¢"èt:‚À< mˆqzÈ!ðññé©â‘Œû×ZÂÇÕ«WÑÚÚ àF’ÑÀMû311)))¦ÇƒæèjDDDDDdF¥RaÑ¢EX´h ¹¹çÏŸGff&233ñå—_bݺuhmm…† ‚aÆA­Vcذaˆ‹‹c#zr ƒÙÙÙÈÉÉ1ýúôi\»v L"Ìš5 qqq¦Æ´$¾P„⼂§ñ4æa4°<â.ÝÀBˆz&„õ3r¹ÜfOÑFuuu6ä×jµHOO‡Á`0kü_c~{H”J%‡†ïEl}.ÚO·eés¡V«-~.ØÃ8‘sP(Ðh4Ðh,ß(¯¯¯7J×~´‘œœ\¸pÕÕÕfë³4ºˆñqdd$=»‰1ÜRBOQQ®^½Šëׯ›æ7Ž£R© V«‘””d¶¢¢¢Xo'""""¢[âææµZ µZÅ‹ªªª••…ü?þø#²²²ðé§Ÿ¢¼¼ T*1|øp 2±±±2dŒèèhŽMݪ©© W®\ÁÅ‹M.\À?þˆ’’@@@FŒµZùóçãŽ;îÀðáÃáëë+rôdË2,ÿð/,ÅRœÂ)¸ƒÇ""kŒ !!„¨w`BY%—Ë!—Ëjµ QCC®]»Öir@NNŽÅ‘ <<<`WIpp0ÜÜx*ÓÝlcœÎÏÏ7kÐ t9ÆØ Vû}Î"DDD}L&³™pl0,Ž@¡Õj‘––†ÜÜ\SýÐÝÝGiû¸¿kllDYY™Õ„Ë—/£¢¢Â4ÛÑ[Œõµ¶ïqtt4<==E,õW¾¾¾?~<Æoö|QQ²²²pîÜ9äääàìÙ³øøãQZZ àFrIdd$bccMÆŽ"""x=š,ª¬¬D~~>rss¡ÕjqáÂ\ºt /^Dnn.š››ÁÁÁˆÅСC1mÚ4Sâ¯Iô^Hð.ÞÅHŒÄëx/à±C""rZÆQ®8BQïÀVTDDDDDÔ-<<<ŠÐÐP¨Õj›óZK>Ðjµ8xð ÐØØh¶¬±Wi[ $aaaJ¥Ž*²Ó³õ§óòòL78ŒÚ¿ÇÆâ{Î؈ˆˆÈ&…BaÌ’¦¦&èõz³Ñ+ŒÓÇ7=6j›Ü`)a$22ÞÞÞ=U<‡0ŽÈg)‘¦¸¸W¯^Ekk+@*•bÀ€¦÷!11ÉÉɦǃ ‚¿¿¿È%""""""êã=‡©S§š=_UU…‹/šñ_ºt رcôz½i>???„‡‡#22áááGDD"##•JejìH}C]]t:)á#??ßôwõêUäç磪ªÊ4PP)™(!!ƒFll,Ì„¢>*±XóŸ³1Ã0L숈œ’»»;ÜÜܘBÔK0!„ˆˆˆˆˆDalh[£Wdffš’Úö‚ t½"t`(TÊŽ $ÆÇά®®ƒÁf‚GQQJJJL ïCÛrªT*ÄÄÄXL¨ «««ˆ%%""¢þÆÝÝÝÔÐÇÚÈtõõõ¦ÄáöI™™™ÈËËCMMi~c‚«¥ÑEŒ=ÆŠUç1Öo­%|äææ¢¶¶à`·TЬÈHÔ  µZ¤¤$³òDEEÁÅÅE”²õ4___h4‹ç555ÈÍÍE^^ž) 77?ýô¾üòËPùùù™F'7^ Ahh¨ÙsLI}}=ÊËËQRR‚ââb”––šîƒ´®m²‡T*EXX˜))èÎ;ï4%G‘éíIÐÍY…UøãQ<Št¤Ã¼¦BDd‰L&C}}½Øa‘˜BDDDDDNO.—›ñÙbLš°”$‘瞇);0àŨO¯·+iÂÚ´R©ì–Fw¶’]ÚN ³eñ¶O­V[Œ[¥RA"‘Ür¼DDDDb‘Éd6ë„Æ$áö Z­iiiÈÍÍEKK €Ž£jXmÄÞæ¶QVVfqTãã¶õºö£h4³x¢”Jx=ó ænß,Züñ“?ˆˆˆˆˆˆ,òöö†Z­¶:B¥ (..F~~>JJJ Óé ÓéPZZŠÂÂBddd˜ž»~ýºÙ²žžž€B¡@@@€Õ?x{{ÃËË ^^^P(ðòòê·#š766¢¶¶ƒµµµ¨­­EMM ***P^^ÞéŸÁ`°¸Œ÷i‚ƒƒqÇw 88J¥J¥!!!ˆˆˆ€R©ä}²È nxï#qxïb9–‹‘S’Ëå!„¨—`Bõ)r¹r¹¼CïÒßàÌÇ|Ä {wíE$"ÑÐЀÒÒRÓÍžöÓ………8}ú4ôz=JKK!‚i}R©AAA¦› AAA R©4M@II ôz=t:iÚx“I¯×›õD&‘HŒ   Ó͋ѣG›¦ƒƒƒÍ¦=<˜?ˆ‹víÆëžuQÞÞÞ2d† b÷2Ƥ‰šš³‘0¬=®­­5%j\»v 0ªªªÐÒÒbJÀè %¨G<1&¹Ï·Û&º´O|i;rŠñ1Gð 1ýÀìÁr,ÇgøLìpˆˆœŽ\.ï±zÝ&„QŸÕ€<'ð!>ÄñG¬ÁHps7\]]MÃßqÇ6çoœ3 Ý»÷¦¶GDDDDâðó󃟟†nöü œÀ8Œƒö‚^z/øùùõüˆmwÝœ:üæ7ÀĉÀºuÀŠ=Y%‘HL£Wö”––³Ñ.ÛjllÄ#<øàƒ •J-ÎçëëË«¨ßñ€ÞÇûH@¶a’‘,vHDDNE.—s„¢^‚ !DDDDDÔ'é¡Ç,ÀüpÛg IDATø{°³1»G·oí¦ õN:èJ‰ò`¹x |þ9ðç?+WÀÆ€§§x1‘h\]];M@ñòò„„„ôTHD½Æ8ŒÃxÏâYLÅT„€ß""#™LÆBˆz ± """""ên§qqˆC! ñ¾ëñd""""ê{tÐÁþCÄd#‰HM>û ˆ‹~úI쨈ˆˆˆˆˆˆˆzWñ*üà‡à(¬DDmq„¢Þƒ !DDDDDÔ§ìÀ$ ·á6œÄI¨¡;$""""êtÐA ¥Øa˜›18sðõÆ>þX숈ˆˆˆˆˆˆˆz/xá=¼‡]Ø…=Ø#v8DDNƒ !D½Bˆˆˆˆˆ¨O `-ÖbáA<ˆOñ)°>D:QW” ÄùB <8zX²xà`Å  ©I쨈ˆˆˆˆˆˆˆzɘŒd$ãiûÏ?&ƒQOÐA½+!Ä() 8v (,â‌ ±#""""""""rj/âE„"Ïà±C!"êîîî8~ü8²²²pþüy¢²²fÉ 0lØ0‘¢$"k˜BDDDDD½Â9œÃݸe(ÃqÇTL;$""""ê'zuBŒœ:  L˜lÛ&vDDDDDDDDDNËxoã_øöc¿Øá9œ§§'|ðA¸»»w:ŸŸŸÆßCQ‘½˜BDDDDDNï3|† ˜€Dà;|‡Ûq»Ø!Q?R‚¸ÀA;”›7`ðï+V=¬^ ´¶Š‘Sš‚)Xˆ…x O¡5b‡CDäpK—.ESS“Õ×ÝÝÝ1wî\¸¹¹õ`TDd&„‘SÛ€ HB~…_á0÷îFxDDDDÔ+é C á†^~£ËÍ xíµ#„üå/ÀÌ™@E…ØQ9¥7ñ&ªQ—ñ²Ø¡9ܘ1c0lØ0H$‹¯777cΜ9=Ùƒ !DDDDDä”Ѐ%X‚UX…Wð 6aÜÑùð¤DDDDDŽP‚(¡;Œîó›ß‡gÎcÆ?ý$vDDDDDDDDDN'!x¯àM¼‰38#v8DD—’’ËMË¥R){8""²BˆˆˆˆˆÈ锡 S1ŸàìÅ^¤"U숈ˆˆ¨ÓA×·Bà ;Ø¿_숈ˆˆˆˆˆˆˆœN R0cð8G+ZŇˆÈ¡’““-&„¸ººbÚ´iðôô!*"²… !DDDDDäT~ÄÑ(D!¾Ç÷˜‰™b‡DDDDDý\ŸL€ÐPàèQ`þ|`Î`íZ@ÄŽŠˆˆˆˆˆˆˆÈi¸À±§q›°Iìpˆˆ* ÷ß?ÜÝÝÍžsçÎ)*"²… !DDDDDä4>ÇçH@b ¾Ãw†ab‡DDDDDÔwBÀÃøààï^yX¸¸~]쨈ˆˆˆˆˆˆˆœÆp Ç3x©HEŠÄ‡ˆÈ¡{ì1455ux~ÆŒ"DCDö`B9… Ø€™˜‰X€¯ð‚$vHDDDDDn$„„ Dì0+%HK¾ú ˆ®^;"""""""""§±k1ð;üNìPˆˆêÞ{ïEDD„鱋‹ (bTDÔ&„‘¨ш‡ñ0Va^Á+ØŒÍB*vXDDDDDJQÚwGiëž{€ï¾š›qãnL<á‰wðþ‰âS|*v8DD#‘Hðè£ÂÍÍ À„yóæ‰u† !DDDDD$šk¸†_â—øcö ©b‡DDDDDd殡 M}„£Aƒ€o¿ÆŒ&O¶o;"""""""""§0Ó1óð$žD-jŇˆÈa~øa´¶¶š››‘””$rDDÔ&„‘(²…Ñ<äá{|$ð9tÐ?F1òñöíRSV¯þs󈈈ˆˆˆˆ¨?{oÃ^Å«b‡BDä0aaa˜2e @­V#::Z䈈¨3nb@DDDDDýÏgø ±£0 ÿ¿ˆ@±C"""""²¨_&„€D¬] „‡O<\¾ lÙxzŠ‘hTPáe¼Œçða†a˜Ø!uª¶¶¨ªªBKK‹Ùkƒ¡Ãü---¨ªªÂ¸qãðÅ_@£Ñ --ÍôºL&ƒ\.ï°œ——¤R©Ùs~~~pqqB¡è¦Ò‘%L!""""¢õ6ÞÆ³xá!¼‹w!…ÔöBDDDDD"ÑA)¤@€Ø¡ˆcéR`èP`î\`òd`ï^@ÙÏ’cˆˆˆˆˆˆˆˆÚxOb;¶c–á(ŽB‰Ø!QQ]]ÊÊJÓ_EE…ÙcƒÁ€ÊÊJTUU¡©© •••hmm…Á`0%r466¢¶¶õõõ¨««»å˜¶nÝŠ­[·Þòz\]]áëë ©T ///Sb‰1‘Ä××®®®P(ðññŸŸüüüàïïoš6þ) S² 1!„ˆˆˆˆˆzH Z°+ñ6ÞÆñG¬ÅZ±C"""""²IBÒ¿oì'$ß~ $%qqÀ¾}€F#vTDDDDDDDD¢p 6b#â‡ñ!Á#b‡DDN¨±±z½¥¥¥ÐétÐëõ(++ƒN§Cii©éµòòrS¢Gkkk‡õ¸¹¹™%BøûûÃ××nnnˆˆˆ€««+üýýM îîîðöö6%\xzzÂÃÃÞÞÞpww7[·ÜÜ:6%÷÷÷‡DbùšxMM ššš:<_QQA:<×ÒÒ‚ÊÊJ477£ºººCŠq“êêj477ãêÕ«fI0•••¸~ýºÅX|}}MïMpp0”J%¥R‰àà`!88!!!ðöö¶º¿ˆz3&„‘ÃÕ¢ ±_âKü/þ ±P숈ˆˆˆìR‚(Á10xð¤ùó‰ü˜=[쨈ˆˆˆˆˆˆˆD1#±ËñžÃLÌD0‚ʼnˆzN§CAA ——‡üü|Óc½^’’TTT˜-#“É„S¢‚Z­Æ€L#^´ý3ŽŒáåå%R)-S(=¾ÍÆÆF‹#¥GP©¨¨@ii)JJJ™™ ½^ââbTWW›­G.—›’E”J%"þ?{wVU¹°ü»A14y ‡œgœõ82rVN‰VVh™yÌl°<™9å<–#N嫉V jET$GPß¾î_&*¸@îÏuíKöÚÏ^ë^kï«6‹uï§BÜÜÜpss£B… ¸»»S®\9¬¬¬žú>Š< BDDDDD$OãèÄiNN8ilt$‘»?Cˆ%KÂæÍ0ltë}cÆJDDDDDDDÄïñ«XE!Ìg¾ÑqD$—¤§§sæÌ¢££9yò¤¹ìqæÌâââøã?HMM5wqq1— êÖ­›iFŠ2eÊP¦LÊ–-‹½½½{U°YYY™å£HIIÉ2+Kbb"ñññœ?žC‡±aÃÎ;gžõÄd2áêꊻ»;nnn¸»»S¡B<<<ðôôÄÓÓ[[Û¼ØM‘ǦBˆˆˆˆˆˆä™(¢è@ŠSœƒÄ£#‰ˆˆˆˆ<’¨HE£cäE‹ÂܹPµ*¼öÄÄÀÌ™P¤ˆÑÉDDDDDDDDž*{ì #Œît§?ý ÀèH"’CiiiÄÆÆrâÄ ¢££‰ŽŽ6ÿ|êÔ)nß¾ @©R¥pwwÇÝÝZµjѺuksAà~ ÄÚÚÚད±±±1¿NçîÝ»ÄÇÇgšé%66–¸¸88ÀÊ•+9þûŒzõê±qãFY¾|9C† QDä1”.]šnݺñÉ'ŸáC‡xå•WØ»w/­ZµÂÙÙ™~ýú±råJnܸat\ÉCš!DDDDDDrì7èK_¶°…E,¢/}Ž$""""’g4CH.xáØ·Ú´† aóf¨ZÕèT""""""""yÆ +>æcZÓšMl¢-mŽ$òT;wŽ… 2wî\Nžúè#ŒŽ)¹D…y¨(¢hHC®r•}ì£ MŒŽ$""""’çâ‰Ç£c<\\`÷nðñ¦MaÓ&£‰ˆˆˆˆˆˆˆä©ù˜sœc³ŒŽ"’gNžXXX°|ùrâââ˜>}:µjÕzê™L&S–[É’%iРË—/êyòƒûÇ!¿2*_~?.¹ÅÉɉQ£FqèÐ!8@³f͘Щüç?F'É3¨À8Æ1‰IœáŒÑqDrUDD=zô Zµj¬]»–Ñ£Gsúôi¾ÿþ{ºu놕••aÙ222ÌÿfddpçÎŽ9°aÃ9r$³gÏ6,›Qî“üʨ|ùý¸ä…zõêñÙgŸqîÜ9æÍ›Ç¥K—hÕªuëÖeñâŤ¥¥Qƒ !"""""ò@_ò%è@7º±‰M8âht$‘§â·¸ÊUÍ’Û,-áË/áµ× (¦M3:‘ˆˆˆˆˆˆˆHž !7ÜËX££ˆäŠ#GŽÐ³gOêׯOtt4óæÍãĉL˜0777£ãeËÒÒ’òåË3`À/^ÌÌ™3Ž$b¸bŊѯ_?víÚÅ¡C‡ðööfРAxzzòå—_’žžntDy*„ˆˆˆˆˆHé¤3žñ aoñó˜‡Æ}ƒ‡ˆˆˆˆÈÓO<€fÉ &|ø!ÌšãÇCHÂob‘gŸ5ÖÌf6+XÁf6Gä±>}šž={R»vmbccÙºu+¿þú+ ÀÒÒÒèx9æëë˹sç2-3™L˜L¦,c³[~ÙÙ³géܹ3ööö¸¸¸Ð¯_?’’’ºýûÏ?vìmÚ´ÁÁÁâŋӾ}{~ûí·,ã=J»ví(^¼8´nÝšcÇŽ=0óÃÄÆÆH‰%þv»ëׯçÅ_ÄÆÆ‚ƒƒ¹víZ¦1ÉÉÉS¹relll([¶,ƒ """"Ó¸#GŽÐ¾}{ìíí±··§M›69r$Ïó>|˜V­Zagg‡ƒƒmÛ¶eË–-<>žžžæcÛ¸qcóòÇyäô5Îêԩ 8~ü8Í›7gذaÔ©S‡Í›õÿ­‚B…Éä7èJWÂã[¾%”P£#‰ˆˆˆˆm{òäIÚ¶mK÷î݉‰‰áÔ©SüóŸÿ¤W¯^›ûïôîÝ›nݺqòäÉn sçÎ 8K—.ñÓO?qéÒ%FŽ™iÌÀqttdß¾}$''³qãF¢££©_¿¾yLtt4-[¶¤cÇŽÄÄÄpúôiúõëGçΉ‹‹ËÓ|ǧW¯^„„„˜˜ÈO?ýÄ7hÓ¦ÍϲeËððð 22’={ö˜—?Î{$'¯q~S©R%¾þúkŽ=JµjÕhÛ¶-ù:³ÜSpjy"""""’çH #9Å)¶³F42:’ˆˆˆˆˆ!â‰Ç„ \ŒŽòlëÛœ!0®\eË X1£S‰ˆˆˆˆˆˆˆäªÙ̦5#ŒBŒŽ#’#QQQôïߟ'NÂo¼µµµÑ±Yzz: lß¾¦NúÄë6l˜¹XP¼xq¦L™‚¿¿ŽŸ?aÂs±îL®^½Jhh(óçÏ 44”×^{—^zÉ<®wïÞôéÓç±rѯ_¿¿Ý.d.:ØÚÚ2kÖ,<==3­kûöíÌ›7R¥JP·n].\HåÊ•ÍcBCC fÈ!æeýúõãöíÛL›6°°°<ËÊ[o½E‹-¨Q£sçÎå¹çžËöØ;vŒîÝ»³lÙ2¼½½³ó(ròçWÕªUcÕªUìܹ“‘#GR³fM>øàFõX3ÓHÞÓ !"""""@4Ñ4¡ I$±‡=*ƒˆˆˆˆH¡O<%)‰5ïœN‹°};ìÝ mÛBr²Ñ‰DDDDDDDDrU*B™ÈÎG䡦OŸN½zõ(Y²$ÇŽ#44´À•AL&&“ KKKÊ—/ϸqãX¸p! xâuûùùeºïááÁùóçsüüû%…? dëÖ­æûÛ¶m£K—.YÆ=Éì,mÛ¶}èv³S²dI.]º”iYÆ éÔ©›7o&íÿf®T©R¦²Fxxx¦RÄ}íÛ·gÛ¶myšoÇŽdZV­Zµlgõ8uê;vdÞ¼yøúúþí¶r*'¯q~×¼ys~þùgFŽÉk¯½F`` É:Ÿ/©""""""ìc iH)J±}T£šÑ‘DDDDD •@®¸£ððõ…Ý»!:üýᣉˆˆˆˆˆˆˆäªqŒÃ 7Í"ùZjj*dܸq¼÷Þ{lß¾Š+ë±ddd˜o—/_æƒ> ((ˆÓ§O?ñºK–,™é¾µµu¶Eƒ)]ºt–eeË–%11Ñ|ÿâÅ‹¸ºf=GÝssÊÅ%ëŒØÝnbb"ƒÆÍÍ KKKs±æ¯V¬X¯¯/ǧT©RøùùÆ;wÌc’’’ððð0¯ãþÍÕÕ5Û×!7ó]¼x''§‡€6mÚpýúunܸ‘£ñ9‘“׸ °¶¶æÃ?dçÎ8p€† mt,ù BDDDDD ¹•¬ÄšÐ„ìÄg£#‰ˆˆˆˆ.\ÈúÇ'ÉC5kž=pí4l'OHDDDDDDD$×XcÍÇ|Ìr–³…-FÇÉ"55•Î;³~ýz¾ûî;BBB°°x6.3vttdРA 4ˆwß}7Ëã&“‰ÔÔÔLËþ:ãDnJHHȲìüùó”)SÆ|ßÉɉøøø,ã²[–SÙíÓ_·Û¿ìììØ³g)))æRÍ_•(Q‚3fpòäIbbb>|8K—.¥OŸ>™öáÒ¥K™Ê9÷o7oÞÌÓ|¥K—æâÅ‹?(@hh(ßÿ=ƒfÿþýÙŽyÔ÷HN^イI“&DDD`ggGãÆùí·ßŒŽ$òlü—ZDDDDDKaô¢A±’•£˜Ñ‘DDDDDò…xâ5Cˆ<<àÇÁÁš4ÈH£‰ˆˆˆˆˆˆˆäšÖ´¦#Å(RI}øDž’ôôtzôèÁþýûÙ¾};mÚ´1:Rž6lëÖ­1$Š~ IDATËr!¿««+±±±™–ýðÃy–cË–¬¥°5kÖЪU+óýV­Z±víÚ=7§ÂÃúݽ{÷2qâD<<<°´´ȶ¼a2™ˆ‹‹î?zõêÅ÷ßÏÖ­[ÍcZ·nÍ®]»²<÷ÇÄÇÇ'Oó5kÖ,Ëú>L5²ŒíÓ§õêÕcÑ¢EtíÚ•'Ndó¨ï‘œ¼ÆMùòåÙ¹s'žžžpêÔ)£#ÉÿQ!DDDDD¤J'á ç5^c³# ýz """"b¦Bˆ\\`çN¨Rüý!"ÂèD""""""""¹æ>!Ž8>æc££ˆ˜Mš4‰mÛ¶±iÓ¦l/ÔV899ѱcG¾þúëLË[¶lÉ›o¾I\\ׯ_gË–-Ì™3'Ïr|ñÅ,Y²„¤¤$ùꫯ #44Ô<&44”éÓ§3oÞ<IJJâÛo¿eîܹ½Ý>ø€åË—ÿív5jDHH§OŸæöíÛDGG3tèÐl×7xð`Ž=Jjj* |ôÑG´hÑ"Ó>¼û\¹’¤¤$®]»Æwß}GŸ>}²©%7óM˜0I“&Î7ˆŒŒä¥—^bäÈ‘<>-Z´`ÆŒ´k×.Ë úÉÉk\/^œ7âââB=²Ìš"ÆÐ_"""""…Ì nÐ….ÌcKXÂHüË®ˆˆˆˆHaO<.¸£ðrt„-[ ~}hÑvï6:‘ˆˆˆˆˆˆˆH®¨@B!”PÎpÆè8"ìß¿Ÿ÷ߟ°°04h`tœ'f2™2ýûWÆ ãõ×_Çd2qñâE¦OŸŽ••>>>¸ººòé§Ÿš‹^σÖý°mþÕâÅ‹ùöÛo©T©•*UbÕªUlÞ¼ó˜Ê•+³iÓ&V®\i·dɾùæ,,íòïû¹–,YÂW_}…‡‡Ç·»`ÁRRR¨_¿>öööÒ¾}û,ûN±bÅhÚ´)4lØ””¾ùæóV­ZÅÒ¥K©R¥ eË–eÒ¤IÌž=›Î;çi>ooo.\Hhh(NNNtèÐ=zd*8::fzÞ¹sçèÝ»7ÑÑѸºº²hÑ"óØœ¾GîËÉk\P988°bÅ ¢££™8q¢ÑqBDDDDD •xâiJSö±pÂéIOò4hЀï¿ÿ>ÛÇ>Œ››iiiìÞ½___lllððð૯¾Ê4þÚµk„„„PµjUlmm)Q¢-[¶ä»ï¾Ëóýx“ÉDzz:ï¿ÿ>X[[ãååÅ'Ÿ|’eì† hРvvvØÙÙýí1‘¼•@B¦B Ëç×|ÅÖÖ¬6m ukX¿ÞèD""""""""¹b<ãqÃqŒËÕõê–<ŽñãÇÓ¸qc‚‚‚ŒŽ’+222Ì·ì4lØÐü¸““poæÅ‹sá®_¿Îúõëqwwϲž­ûaÛü+6lØÀÕ«W¹~ý:7n¤zõêYÆÕ¬Y“7rýúu®^½Ê† HIIÁÝÝ=§‡#S¾5j°eË®]»öÀí:;;³`ÁHMM%**Šž={fÙ¿€€Ö¬YÃÅ‹IMM%&&†3f`ooŸi}U«VeåÊ•\¹r…ëׯA```žçƒ{¯õž={¸uë±±±Œ?>ÓãW®\Éô¼råÊez-ûõëg›Ó÷È}9} *OOOBCC™5kqqqFÇ)ôT)$ŽqŒ4à WØÇ>^äECóŒ=šY³feûØìÙ³2d–––ü÷¿ÿ¥{÷îsá–/_Îäɓٵk—yüÀIKK#<<œäädN:ÅèÑ£™={öSÚ›¿7lØ0nݺExx8—.]bîܹ̜9“eË–™ÇüôÓO 4ˆQ£FqúôiN:ň#èß¿?00½ˆˆˆHás…+ÜâV¦BHaúüš¯XYÁ·ßB߾У¬\it"‘'f5ó1ËXÆväÚzuKÕÑ£GÙµk—¾å?Ÿ2™L„……qùòeRSS9t课ú*Ç7:šC‡ÅÑÑ‘yóæ¥ÐS!DDDDD¤ØÉN^äEÊR–}ì£*UŽD÷îÝùí·ß8zôh¦åIII¬ZµÊüí#“'OfüøñôéÓ|}}™9s&Ó¦M3?gÛ¶m„††R±bEŠ-J©R¥èС[¶lyªûô ...Lž<OOOìììhÚ´)³gÏfæÌ™æ1S§Nå½÷Þ£oß¾”)SgggúõëǤI“øðà L/"""Rø$©R˜>¿æ;EŠÀW_ÁСл7ÌŸot"‘'ÖšÖt¤##ÉîäÊ:uKÕÚµk)_¾<~~~FG)L&S¦æ»ï¾cÆ T¨PgggÌ+¯¼Âرcó2¦Ë•õé–<ªcÇŽñ /`a¡Ë‰Ÿ†ŒŒ ó-'Ú·oOxx8×®]#99™ƒ2pàÀLcL&ÓßÞäézÔ׸ «_¿>¿ÿþ{¡ÙßüJÿy†…FozDËYN1Š)“   V¯^ÍÅ‹HOOç³Ï>cĈæ1gÏžÅËË+Ó ‹R¥Jqþüyó˜¥K—’˜˜ˆ§§'Õ«WçŸÿü'ëÖ­Ë7¿pzxxdYV²dI._¾l¾Ÿ`>ûg...ÄÇÇçe<ù‹xâ)B‘,eêÂòù5_›4 ¦LÑ£aút£Óˆˆˆˆˆˆˆˆ<‘ÊTf c˜ÈD.r1WÖ©sXò(’’’(]º´Ñ1ä ü¹€ÝM$/•)S†””®]»ft”BM…‘gP:é e(¯ñó1a„a‘?þ;99ѵkW¾øâ Ö­[‡««+¾¾¾æ1œ;w.ËI‹ôôtó˜J•*±fÍ®\¹ÂÒ¥Kiذ!“'OfðàÁO}Ÿ²““oSqqqáÂ… Y–_¸pWW×¼ˆ%""""O¿æ{ãÆÝ›!dìX?Þè4""""""""Oä-Þ¢Åx‡wre}:‡%ÂÉɉÄÄD£cÈ#HKK#((Èè¹*((ˆ´´4£cÈc¸pá666ØÛÛ¥PËW„‰ˆˆˆˆÈ¹Îu:Ó™…,d kÎp£#ý­Ñ£GóùçŸsçÎfÏžÍÈ‘#3=Þ¼ysÖ­[—£uY[[S§N‚‚‚Ø´iË–-Ë‹ÈyÂÇLJ 6dY¾~ýz||| H$"""Rx%€+Ù—rõù5Ÿ>æÌiÓ $ôMw"""""""R@§8ð_ò%‡9œ+ëÔ9,É©êÕ«sðàAîÞ½ktÉ¡üüüŒŽ‘«š4iÂx}ùOAõêÕ1™LFG)ÔTy†œç¿æsíÚÁš5°x1¼òŠJ!"""""""Rà¸âÊ8Æ1…)ÄûXëÐ9,yï¼ó­[·¦mÛ¶8pÀè8òIII”(Q"GcK–,É¥K—2-kذ!:ubóæÍ¤¥¥P©R¥L3uÀ½™D¾øâ óýO>ù„ýë_Y¶áçç—龇‡çÏŸÏ´lÇŽdZV­Zµ,Û,Q¢III9Ú71ÎÕ«WiÓ¦ ‰‰‰¬X±kkk£# *„ˆˆˆˆˆHi¤ñ*¯òoñ ŸFðã}FF7nÜ`õêÕ888GDDDD„‹\$´l !úüZ´m{¯²d ôëÿ÷GM‘‚âu^§<åy‹·ëù:‡%Â+Vàç燿¿?ëÖ­3:’dãÚµkXZZfYž˜˜ÈàÁƒqssÃÒÒ“É„ÉdÊ2nÅŠøúú2|øpJ•*…ŸŸaaaܹs'Ó¸¾}û²|ùrnÞ¼ILL §N¢]»vYÖW²dÉL÷­­­³=.^¼ˆ““ÓC÷ÍÒÒ’ää䇎ãüñÇömÛððð0:’üŸ‚wŘˆˆˆˆH!wk´£KXÂzÖ3ŒaFGyfÄm!D ˆ6m`Ý:X»ú÷W)DDDDDDDD +¬øYÌbö°Çè8RˆXYY±råJzöìI·nݘ2e w5 o¾booožÙãÏú÷ï{öì!%%…ŒŒŒ,Å ¸7 ÇŒ38yò$111 >œ¥K—Ò§OŸLãÊ”)Ë/¾ÈÒ¥KùüóÏ5jT¶“œ(]º4/^|踴´´Ï~"OßîÝ»©W¯)))ìÛ·5jIþD…‘äçðÃ(¢ØÍnÚ‘õDDDDDäñ©òŒhÕ 6m‚ àÿP)DDDDDDDD ”®t¥%-Íhî¢ òåé±²²â«¯¾â£>"44”æÍ›súôi£cÉÿ)]ºt¶³hìÝ»—‰'âááažAäæÍ›YÆ™L&âââprr¢W¯^|ÿý÷lݺ5ËØòé§Ÿ²nÝ: ðØ™›5kFxxx¦e‡ÎR(HNN¦téҽɩ©©Œ;5jÄÞ½{©T©’ѱä/T) ŽqŒF4â:×ù‘ñÁÇèH"""""Ïœxâ±ÆGŽ"OªiSظñÞ­o_•BDDDDDDD¤@™ÉL"‰ä¾1:ŠBcÆŒáçŸæêÕ«Ô¬Y“ÐÐPRSSŽUèyyy™ Ö¨Q#BBB8}ú4·oß&::š¡C‡f»ŽÁƒsôèQRSSIHHà£>¢E‹YÆuèÐÓ§OÓ£Glmm;ó„ ˜4iáááܸqƒÈÈH^zé%FŽ™i\\\^^^½É};vìÀÇLJ/¾ø‚Ï?ÿœU«Vaooot,Ɇ !"""""À>öÑ”¦”¥,{Ù‹'žFGy&%€ .˜0ErƒŸ¬_¯Ò¯¤§HDDDDDDD$GjPƒWy•7xƒd²Î ’×jÕªÅO?ýÄo¼Á´iÓ¨]»6«W¯&##Ãèh…V@@?ÿüs–å , %%…úõëcooO`` íÛ·îÍ r_xx8ÅŠ£iÓ¦888аaCRRRøæ›¬Å3 1bD–Çî¯óÏë~Ðrooo.\Hhh(NNNtèÐ=zd)¬üòË/´lÙ2§‡BòбcÇ $ €êÕ«säÈ‚‚‚ŒŽ%C…‘|n5« €Æ4f;(C£#‰ˆˆˆˆ<³HÀW£cHnjÞ¾û6l€þîÞ5:‘ˆˆˆˆˆˆˆH޼Ç{¤‘ƦE )kkk&L˜À±cǨ[·.Ý»wç…^`ãÆ*† 00… fYîììÌ‚ HHH 55•¨¨(zöìIFFF¦×) €5kÖpñâERSS‰‰‰aÆŒÙÎú°zõjš4iBÙ²e³7ÆÇLJß~ûÅ‹sâÄ ‚‚‚(R¤ˆÑåèŠ2‘|&•TúÒ—™Ìd‹%ÔèH"""""…† !ϸ֭aéRX°ÆŒ1:ˆˆˆˆˆˆˆÈC¡³˜Å:Ö±…-FÇÀÇLJo¿ý–'NЫW/>ÿüs*W®LëÖ­Y¾|9©©©FG|fYZZ2wîÜ<[FF·oßfæÌ™y¶¿úòË/±´´|jÛ+ÌöïßOPPåÊ•ãÕW_¥\¹rlß¾гgOA (BDDDDDò‘Ë\¦­ØÌf¶²•¾ô5:’ˆˆˆˆH¡O<.¸CòR—.÷f ùì3ø×¿ŒN#"""""""òPþøÓ….ÌîGÄÌÃÃiÓ¦qöìYV®\‰••}ûö¥|ùòŒ3†C‡Q¤ÐKHH`æÌ™ÔªU‹ °oß>&NœÈüÁòåËñ÷÷7:¢Eíh‡#ŽüÄO¸ãnt$‘BçÈ C…¢gO¸y^~¬¬`£‰ˆˆˆˆˆˆˆà^æe^åUjSÛèH"9V½zuªW¯Îøñãùã?X·n›6mâÍ7ßäêÕ«”-[–€€sA¤bÅŠFGÉW®^½Ê?üÀŽ;رc‘‘‘)R„zõêÑ¿ºté‚&“Éè¨ò”©"""""b0Â&˜‘Œd&3±ÀÂèH"""""…Z<ñØcvFG‘§môh¸uëÞ¿%JÀ€F'ÉÖðŸL0ÛÙnt‘ÇR¾|y† ưaÃHKKãàÁƒìܹ“;v0|øpnݺE•*UhÞ¼9~~~øúúâåå¥ Ý¥PILL$""‚½{÷²cǃädèÚ¶mƒFŒN%"""""""’Å¿ù7Ïñs˜Ã(FG$ÏXYY™/Œ1b—/_6D"""˜:u*.\ÀÅÅooo¼½½©U«ÞÞÞÔ¬Y[[[#wC ¡ôôtNž|Ø\:¸téR¦çXYYe[qrr¢D‰8::R¢D‰,?;88<ÝÎ×RRR¸rå ÉÉÉæÛ•+W¸|ù²ùþŸK \¸pëׯgZ­­­ùø»¸¸àããCçÎͳ½T¨P²eËR´hQƒöTäñ¨"""""’‹ÒIg(C™Ç<æ0‡ ‚ŒŽ$"""""9 Bˆü-7·ÿ?SH÷î°aè‚"""""""’O´£­iÍÆpƒX`at$‘gš³³3ÎÎÎøøø’““IMMÍ6ËŸ‰‹‹ ÎÎÎT®\ggg\]]qrr2ÏÐâììŒÝ_/‘‚L…‘\rƒô¢»ØÅ:ÖÑžöFG‘Š'&œq6:ŠäWÕªÁÆÐ¬  óçƒÉdt*f0ƒ:Ôa KèG?£ãˆzE‹¥\¹r”+WŽ:uêäè97oÞÌR†¸ÿóý[rr2ׯ_çÖ­[$$$pûöms©âÊ•+¤§§“œœl.bäÖÖÖØÚÚR¬X±Le–%J˜Ë,E‹¥råÊYfIyÐ *"r !"""""¹ ‰$:щÿò_¶²•F42:’ˆˆˆˆˆ<‚(E)¬°2:Šägÿó?°n´m åËÃHDDDDDDD€ÔàŸü“ñŒ§+]±ÅÖèH"òˆlmm±µµ¥lÙ²¹¶ÎÔÔTnÞ¼iž™ãÏnݺEJJJ–ç\¾|ùë³³³cóæÍÌŸ?ŸÕ«W`aa‘¥ qf“û3•ˆHÞQ!DDDDDä âmhÃî°—½xáet$yD $àŠ«Ñ1¤ ð÷‡¯¿†~ýÀÕF2:‘ˆˆˆˆˆˆˆïó>ËXÆLfòoGDòkkk¬­­)Y²d®­311‘÷Þ{///•=Dò £ˆˆˆˆˆd9HCâ€ûا2ˆˆˆˆˆHO¼ !’s}ûÂûïCp0¬Zetœqf,cù9Ïy£ãˆÈ3ªZµjdddpüøq££ˆ*„ˆˆˆˆˆ<¶p o¼ÙÎv\p1:’ˆˆˆˆˆ<&B䑽ù&Œqo¦={ŒN#"""""""Àk¼FiJó.ïEDžQUªT¡hÑ¢ü÷¿ÿ5:Šˆ BˆˆˆˆˆÈcYÀÚÑŽÎtf#qÀÁèH"""""òT‘Ç2s&´o:@d¤ÑiDDDDDDDD(F1Þç}æ1(¢ŒŽ#"Ï KKKªT©¢BˆH>¡BˆˆˆˆˆÈ# #ŒA b(Cù†o(JQ£#‰ˆˆˆˆÈŠ'^³þÉ£³°€… ÁÛÚµƒ³gN$"""""""Â?ø>ø0†1FG‘gÔsÏ=Çïÿ½;‹ªÜÿþö}uaS™ÅÜÐRPÓDMa¸[™eš7Û35«[i«Ke–Z7ÍPÛõº–7÷BMRÑaQ6A‘õùýá¹ 0£0g`>ï^óRΜ9çóŒ‡éžÃsXƒ5X…UA&u,"""""ºCyÈã !t{lm;7·[E!EER'""""""""'ƒ «° ¿á7ìÆn©ãQ;È‚"#Á‚"""""T S0ë±[°ó0OêHDDDDDÔBŠP„ T° „nŸ³3°kP\ LœTTHˆˆˆˆˆˆˆˆLÜ` ÆDLÄsxÕ¨–:µ3HMMEmm­ÔQˆL BˆˆˆˆˆšQˆBD ñˆGâ0 “¤ŽDDDDDDw ¹¸ŒË¨D¥úgèŒÎRÆ¢¶ÎÇر8q˜=BêDDDDDDDDdâÞÃ{PB‰õX/u"jg‚‚‚P^^ŽÌÌL©£™< ©³ld#‘(@~Ãoèƒ>RG"""""¢;4ƒ‘Žt€œà g8Á oâMtAxÂÑC1rÈ¥ KmKŸ>ÀöíÀ¸q@` °x±Ô‰ˆˆˆˆˆÈ„\¼x¯¾úªÆºÿüóOÀ¤Iÿ»á™™™Þzë-øùù<#– <…§°K1“á g©#Q;8wî|}}¥ CdâXBDDDD&í ®À N°MƒçÎà " '8á(Ž¢ ºHˆˆˆˆˆZZô@2 pý¿ÿÀøÿ½l^‰J<ŠG±¤ŒJmQD°v-0g PS¦HˆˆˆˆˆˆLDii)¶nݪõ¹ôôtŸ_yå$""c°Kð5¾Ær,Ç;xGê8DÔN¸¹¹¡cÇŽHIIÁ˜1c¤ŽCdÒ̤@DDDD$•2”¡7zc4F£•ÏíÇ~ ÁøÁqÅ DDDDDíÈ}¸O]øQW-jQùßÿà1} ˆˆŒ+\±Kð!>ÄE\”:µ#AAAHII‘:‘ÉcA™¬ð®â*ã0¦c:jqkúìñ#"‰]ظHœ”ˆˆˆˆˆZÒ½¸U¨jôys˜£?ú#¡LEíÎGÝš-dÂà"[‘aLŸ>–––>oii‰GyÄ€‰ˆÈ<…§à _,Á©£Q;„sçÎIƒÈä± „ˆˆˆˆLÒ\Á;xÕÿýï;|‡ù˜ÕX‡ðÇãøßÁ¶RG%""""¢Öýá‡FŸ¯E-¿§;gflÞ x{ß* )*’:™€)S¦ ºººÑ竪ªððÃ0KXbV ±8Š£RÇ!¢v" ©©©RÇ 2y,!""""“ô^Ó¸#p-jñ>ÃKx ˱«±f<]&""""j—ÌaŽ0„i=ç—A_øb&HŒÚGGàßÿŠ‹˜ ‰AYDDDDDD-ÁÏÏ}ûö…™™–>¯L†þýû# @‚dD$µð†a^À RG!¢v" YYY(++“: ‘Iã7""""29)HÁøB£ nâ&:¢£DɈˆˆˆˆÈPîÃ}0‡yƒåf0Ã+x…âÔr¼½ŸžzJê4DDDDDdf̘¡µ ÄÜÜ3fÌ ‹÷ðá~ÆÏRG!¢vÀßßB(•J©£™4~£EDDDD&ç¼Ðäà®Ù˜=ØcÀDDDDDDdhÃ1¼A‘88Á Ó0M‚DÔ®…„ÿú°aðñÇR§!""""¢v.&&µµµ –×ÔÔ ::Z‚DDd,b ¢—ðªÁ™L‰èÎøùùÁÌÌ .\: ‘IcA™”ƒ8ˆØ¡uà—Š€Àƒx‰H4`2"""""2¤þè;Øi,³„%žÃs°…­D©¨]‹ŠÞ|xæ`Ç©ÓQ;æáá¡C‡ÂÜü3cš››ãÞ{ï…———„ɈÈ,Çr¤! ±Qê(DÔÆÙÚÚÂÛÛ©©©RG!2i,!""""“! ° a‹f×-G9Va•R‘,`!¢1{ 9Ìñ$ž”0µ{‹Ó¦Ýzœ9#u"""""jǦOŸ®Ó2"2=rÈñžÀ2,CʤŽCDmœ¿¿?g!’ BˆˆˆˆÈd|‡ïˆÄF§¾µ„% ÁX÷ßÿˆˆˆˆˆ¨ýº÷Á·î–j KÌÁ¸Ã]âTÔî}þ9Ы0q"PT$u"""""j§¢££afö¿¡afffxðÁ%LDDÆd)–¢ eøJ…ˆÚ¸€€ÎB$1„‘I¨B^Ä‹wÿU±€,a‰ ˜€ƒ8ˆ$$aæÀv$%"""""CŽá¨B 5øþ!q"2 VVÀ?ååÀ¤I@MÔ‰ˆˆˆˆˆ¨rrr˜1c`aa DFFÂÅÅEêXDd$: ^À xï"yRÇ!¢6Œ3„I!DDDDdÖb-2‘‰ZÔ¸UÐ ¯âUd#ßá;„!LʘDDDDDd@!-lá!( 8™ŒÎŸ–,‘: µSÓ¦MCMM jjj0uêT©ã‘‘yÏÁ.ø'þ)u"jÕ•…²²2©£™, ©QûURR‚êêêFŸ¯¬¬Ô躺º6¹=;;;X[[ë£ÅX†e¨E-,a‰jTã>܇X€HDj5„ˆˆˆˆˆÚ®²²2TVV6úü7PQQà=¬`îõ¹(¬)l°®µµ5ìì8{ µ‚þýu뀙3>}€‡–:µ3&L€ `üøñ§!"cc [,ÅRÌÃ<,À @êHDÔùûûC¥R‰»îºKê8D&‰!DDDDíTii)nÞ¼‰ëׯ£¬¬ (**Byy9nÞ¼‰¢¢"ܼy7nÜ@qq1***PZZª.⨭­Eqq1àæÍ›(//£¶¶ÕÕÕ()) 9˜J ŽŽŽ°°°€™™œ666°µ½u§ßÂy…(|¤7, Ø§€"NN%°Ãf¸P¯ëââkkkØÛÛÃÉÉ 666ppp€ƒƒlllàää{{{XYYIÖV""""¢¶@ÕŸ(++Cyy9®_¿®ÑG)--Eyy9JJJPRR‚òòrþHÝþ†¶e¥¥¥¨ªªÒXvÛ¦ Œxt„^/srr‚¹¹9,--áààà} 8::j]æèèõßmmmáàà'''ØÚÚªû#ª¿;;;ÃÌŒEìíÖôéÀ±cÀ#·ŠDˆˆˆˆˆˆôPTT„ôôtddd ==]ýÈÈÈ@FF†ú;>tëÖ Ýºuƒ¯¯¯ú¡úÙÅÅEâ–‘Å£X…Ux¯b¶I‡ˆÚ ???˜™™áÂ… ,!’ BˆˆˆˆŒTUU põêU ¸¸EEEþY÷Q\\ÜäÌ4ŠêHRÍÂaff¹\™L+++ØÛÛ`iiÙhñE]ºÜIW5+HýÙB´)**‚¢ÁrmE*u—‚®_uE—#]p³ð& ¯$•••(..VÉ¢¢¢7nÜh2‡œáââÒàOÕ£þrwwwtìØnnnMn›ˆˆˆˆÈ˜ ??(**Baa¡FߣîÏuÿ®*.oLÝ¢kmØVVVP( ÑGQõATËd2™Ö+u‹4´©û¼ª¸ñÚ×U?_ªR·¡ê›h[vãÆ äççãúõë(//GYY™Æß›R·¿áêêÚìß;t耎;¢C‡Mn—ŒÄGgÎQQÀŸüw#""""¢:®^½ª.î¨_𑞞®Ñ÷ððPyDDD¨ @hlãèÑ£ˆEnn®úµÎÎÎ"u Fºuëwwwƒ·ˆZŸ9Ìñ6ÞÆx‡q¡•:µ1¶¶¶ðööFjjªÔQˆL Bˆˆˆˆ ¤²²¹¹¹ÈÊÊÒ(ôÈËËÓø¹  W®\Ñ:€ª±BbÕr[[[8::ÂÞÞÖÖÖpqq­­­zjh“3K÷U¯_¿Ž›7o¢´´¥¥¥¨¨¨Pßḱ¢œ .4[˜caawwwtèÐAýèÔ©“úïªç<==ѹsgtîܹeß""""2iååå¸|ù2rss‘ŸŸÜÜ\u?DÛÏ555¯·±±ÑZ„àåå¥þ»êOggçF ?¨¡Æ EŠ‹‹µÞ\¹r)))ËoÞ¼©±MsssuaHÇŽѹsg­?{zzÂÛÛ[k¡?€¥%ðÝwÀÀ@L °g`Á¯oˆˆˆˆˆLEaa!”J%²³³‘““¥R©~\¸pAã;CWWW( ( Œ3žžžðòò‚B¡@÷îÝÕ³Uꪲ²—/_n°ÿäädìܹêk666ê}©u÷¯ºÑµ=0Ã1/ãeüŽß¥ŽCDmP@@.\¸ u "“ÅoˆˆˆˆîPuu5òòò••…ÜÜ\\¾|yyy¸téòòòÔƒ­®\¹¢ñ:{{{¸»»£S§Nê9~~~ TgggXYYIÔJÓãää'''têÔ鎶SZZŠââbõ€:ÕÝ•UE@ùùù8{ö¬FQPÝ»[YY¡sçÎðññ‡‡‡úOoooŸyW&""""*((@vv6.]º„ììldeeáòåËË®]»¦ñÕLvª   F \]]M·¸ÜT};¡šùPÕÿÈËËS÷AT?Ÿ´}¤¦¦âúõëêuë|„‡‡cΜ9ꢋ   ØÛÛ·h6Õ ªY:ëSŒh+VIHHÐ(±¶¶†···Öb…B___˜™™µh~"j9˱ƒ0;±÷ã~©ãQ€””©c™,„5£ªª —.]RO¡œ‘‘´´4õß³²²4fpssSß]ÕÓÓ}ûöÕ¸ïããƒ:ðΫ&ÂÁÁðööÖù5EEEÈÍÍU©f–ÉÉÉÁÉ“'‘““ƒ¬¬,»ÿÚÙÙ5˜¾»îŸœi„ˆˆˆ¨í+))R©DZZZƒ‡R©Dyy¹z]'''øøøÀÛÛ^^^ §§§z ¿j`¿g!hWlllàéé OOOÖ¯®®F~~~ƒ¢Ë—/###GŽÁ¥K—PRR¢~­­-är¹úî¯u …Bï»ÑR}ûëÖÓ§={³fIˆˆˆˆˆˆtÐTÁÇùóç5úTÚ >TÅr¹vvv¶¤¡æ FªªªŸŸß XDU0’™™©þµ¹‚‘nݺÁÜÜÜÍ#¢:îÆÝxâ¼€1 -%"=øùùa÷îÝRÇ 2Yü¿6nÝEõüùóHIIÁùóçqþüyuÑGvv¶úÎ6¶¶¶ê÷={öÄØ±cÑ­[7õR½¼¼xÇ\ºc...pqqAPPP“ë]»vM]rùòe¤§§#==ÿý7vî܉¬¬,­Ç®\.G`` ѽ{wtëÖwd""""27nÜÀ¹sç’’‚ääduß$-- êõ¼¼¼Ôƒð|ðA( tíÚU=»CKß1”Ú' uIHHH£ë•––ªg™QÝ$!-- ÇÇwß}‡œœõº:tP›Ý»wGÏž=„ÀÀ@£Ød”¦Myónˆôí+u""""""“W·à£~ÑGJJ JKK–––èСƒºÈ¡nÁ‡B¡@—.]`ii)qkZ–¥¥¥ú;ÒÆú•õß?ÕãàÁƒHKKSßàÂÊÊ >>>Z‹EX0Bdïâ]ô@ü ÿÂcxLê8DÔ†øúú"++ •••°²²’:‘ÉaA™”ÌÌLuá‡êqþüydff¢¶¶æææðõõE÷îÝÑ»woŒ?^c¦β@ÆÄÍÍ nnnÖú|UU•ºPD5ÃMzz:’’’°}ûväçç¸uáîÝ»«ªb‘ÀÀ@¸¸¸²IDDDD&ãêÕ«HNNÆÙ³gqîÜ9õŸBÀÒÒþþþ ÂСC1sæL™XˆN†äàà€=z GZŸ¿yó¦ÖÙk¾ÿþ{¬X±UUUÉdèÖ­‚‚‚гgO¢GèÙ³'ÜÝÝ Ü"#÷þû@RðÐCÀñ〳³Ô‰ˆˆˆˆˆÚ5m ª¢sçΡ¬¬ @ó]»våLœZ¸ºº"$$Dï‚‘ãÇ#==7nÜpëýïÒ¥‹Öb¾ÿD-Cfc6–b)b{ð¦3D¤¹\Žšš\ºt ~~~RÇ!29< &""¢v©²²©©©8~ü8Ž?Žäädœ:uJ=¾îtÌ3gÎDpp0 zôèÁ;–R»aii©0¨MQQ.^¼¨¾°~æÌìÝ»«V­R¹áéé‰àà`ôìÙS}±>((ˆw`""""ÒCvv¶FßäÌ™38{ö,„°¶¶†ŸŸ‚ƒƒ5ú&ÁÁÁ,ú 6ÃÆÆ={öDÏž=|ÇŽCqq1ìíí1pà@Lœ8ƒ  AƒÐ±cG©ãµk6660`  ^&„@JJ Ž9‚#GŽ`÷îÝøàƒP[[ ¹\ŽÁƒcذa5j …„鉈ˆˆî\mm-Nœ8={öàÀ8räJJJàêêŠÐÐPÌ›7C† ÁÀacc#u\¢6ÉÁÁaaa S/+//Gbb"8€Ã‡céÒ¥(**‚££#BCC1tèPŒ3ýû÷o7…X´8tˆ‰Nž8ã)µ3ª‚úƒõUŒŒ ÔÔÔ¸õ=EÝððpõ`}…B¹\ÞþútÇt-©ü%$$àÂ… (..ÖØ–¶b…BÀÀ@Þ „Ú {Øc)–b`>æÃþRG"¢6@.—C©TJƒÈ$± „ˆˆˆŒŽR©óKh IDATD||<âââð믿¢¨¨]»vŰaÃðÖ[oaðàÁèÝ»7,,x*C$5™L†   á‘G\¿~GÅ‘#GpôèQ<÷Üs(++ƒ¿¿?"""#FÀÉÉIâôDDDDÍ»víâââ°{÷nìÙ³W®\··7FŒwß}C† AÏž=y‡Q¢Vdkk‹¡C‡bèСþ7{¨jvžµk×bñâÅèܹ3ÆŒƒÈÈHŒ5 nnn'offÀ7ßýû3g;vàFDDDDmHEE²²²´ÎРT*‘žžŽÚÚZÚ >êºgÁµ] F´+%$$àâÅ‹(**ÒØ–¶b…BîÝ»ÃÑÑÑPÍ"ºcãq¬Á,Á|‹o¥ŽCDm€\.Ç¡C‡¤ŽAd’8Š’ˆˆˆ$W[[‹ßÿß}÷öîÝ‹ .ÀÁÁ÷Þ{/^ýuDDD ((Hê˜D¤#'''uápëî^‡F\\âââðÙgŸÁÌÌ ƒFdd$~øaÎBDDDF%33[¶lÁŽ;pìØ1Èd2„……aáÂ…ˆŒŒDŸ>}¤ŽHdÒÌÌÌЫW/ôêÕ O>ù$àÔ©Sؽ{7vïÞiÓ¦A{î¹&LÀäɓѵkW‰Sß77 6¸÷^à½÷€_”:‘Zý‚úEº|Ôüüüàââ"qkˆZž­­­ú8צ±ß¡3gÎ !!¡Éß¡ºþ‘&b"BŠ—ñ2öc¿ÔqˆÈÈÉåräç磴´RÇ!2),!"""ƒºxñ"6nÜˆØØX(•JâñÇGLL g!2:t@LL bbbP]]}ûö!66Ë–-ÃsÏ=‡aÆaêÔ©˜ŠQ£FA&“IWwÏ?> Lšœ< xxHˆˆˆˆˆÚÆf7P}äææj VWÍnŒñãÇsv¢fØØØè]0¢ú¹~ÁHýYvê?\]] Ù42ïâ]„! »±‘ˆ”:1¹\HOOG¯^½$NCdZXBDDD‡Õ«Wc÷îÝðòòBLL &OžŒþýûKˆ$daaÑ£GcôèÑøì³Ï°{÷nÄÆÆâé§ŸÆ /¼€Y³faÁ‚ê DDDD-¡²²ÿú׿°råJ$''cذaX³f zè!Þi‘¨±²²Â„ 0a©g=z4‚ƒƒ±páBLŸ>½m€ÉdÀÆ@H0u*˜›KŠˆˆˆˆŒ\s999êuU …aaaÏàää$aKˆÚ§æ F***••Õà÷VU0’žžŽÚÚZõ¶+ñôô„§§gÛº1…P„â<€EX„Ñ 3˜I‰ˆŒ”\.‡L&CZZ Bˆ Œ!DDDÔªvî܉7ß|üñFމíÛ·c„ °°àii²¶¶Æ<€xW¯^ņ °víZ|òÉ'˜:u*/^ ???©cQVSSƒýë_xã7››‹©S§âÛo¿EïÞ½¥ŽFDàââ‚Ù³gcöìÙHJJÂÊ•+1oÞ<üóŸÿÄÒ¥K1cÆ ˜{…«+  ¼ó°x±Ô‰ˆˆˆˆHb………Äë>.^¼ˆ¢¢"õºu >ÂÃÃáéé©<Þ½{w8::JØR5jâãã öú;Ý_koÏÔY[[7Y0RYY‰Ë—/7(QŒddd ¦¦@ã#ªÏÕ@^¢ú–c9z¡¶b+&c²ÔqˆÈHÙÙÙ¡S§NHKK“: ‘ÉáHL"""j)))xæ™g°gÏ„‡‡ãرc¸ûî»%É¢ËE+Õ4»­ÃûiMª÷òvÛÑïÁfjKûníýIù^ÖçîîŽ_|Ï?ÿ<¾ÿþ{,]º=zôÀܹsñæ›oò.dDDD¤·'N`îܹ8yò$bbbðúë¯K2 û'†Ç~HÛÚŸ¡ôîÝ7nÄ믿Ž÷ßsçÎÅêÕ«±víZ 4HêxM»ûn`Å à…€ðpÀØóÑ),,Ô:Ð[©TâÂ… (..V¯[¿àcΜ9êÞppp0Xîúý_¸»»£_¿~ˆŠŠÂ”)SZt¦¾ºûkëýÕl-ùú¦úÆ·»¿Æú‹wš¿­ûõ×_±fÍüç?ÿAMM 0þ|Ìœ9³UŠ-¬¬¬t*ÑöR¿`ÄÚÚÞÞÞZ‹E |}}afÆÙ!LQ 1Sñ*^E¢`…60Ó*IB.—³ „H,!""¢%„ÀêÕ«ñâ‹/¢W¯^8pà† "u, ‘õ/xò.'ºBÝû%e&Cﻵ÷gŒÿ¾fffˆŽŽFTT6oÞŒçŸ?ýô6oÞŒ¡C‡JˆˆˆÚ!Þ}÷]¼úê« ÃÉ“',y&öOÚ&öCÚ.]º`ÕªU˜?>æÍ›‡!C†`ñâÅXºt©qrùÇ?€øx`êT௿ÞÉ™ˆˆˆ¨ÍR|h+ú8þê}¤¤¤ ´´T½nS …¶¶¶¶äÎØÚÚB.—C.—ãá‡ÆçŸŽˆˆüõ×_pvv–:Q‹Ú´iz÷î àÖ •_}õÂÂÂ$)iŽ¥¥%¼¼¼àåå…ÏëS0bee­Å" …ݺuƒ¹¹¹¡›H-¤ºá <7ñ&fb&ì`}cÇŽ…££#1vìXœ>}ºÙ¶©¶¥ËrÕ²äädŒ3NNNpppÀ¸qãpöìÙÛ8sæ ÆŽ 8;;câĉÈÌÌÔšã—_~AXXlllàëë‹… jܱJ×÷`ܸqê÷`̘1:½Ñu{ºd×÷ýÐeßMk·ÓV]ŽŸS§N!""öööprrBdd$öîÝÛèvýýýÕ™Œa¦èر#~ùå,]ºsçÎÅÇ,u$"""2RµµµˆŽŽÆþýû±oß>ÌŸ?ß(ŠAÚsÿhþœS—óà–<—g?¤åû!MõšËp'Ç­!Èd2,X°ûöíï¿þŠI“&÷Ý…;uÖ­6n¶m“: ‘É*,,ÄñãDZ}ûv¬X±O<ñÆÀÁÁnnn0`¦NŠU«VáСC€ððp|ðÁˆÇÅ‹QUU…k×®!11Û¶mÃòåË1g΄‡‡#88¸Mƒh3gÎDDDàý÷ß×X~'}Ò¦ìß¿_£ï¡RZZ GGGdgg«—YXX¨×Û¿¿^ÙT¯»téþïÿþŽŽŽèܹ3¦M›†«W¯6›³©ïøtÙfc¯¯û\ݾqc}B]û­úä¯ÿ°±±Q¯s»ýs]ú¹º¶E—ïÑtÉ)„Pƒ¨¸»»£¢¢¢Ùö#UÁHHH¢££ñÒK/aݺuZ?»6oÞ¬.dËÉÉÁŽ;°páBŒ5 ~~~°³³ƒŸŸ† ‚I“&áå—_ÆçŸŽ„„(•Jua ¯ÅXŒR”b5VK…ˆŒT—.]½†MD­H‘^ ¶Š­RÇ v€ØºµõާuëÖµÚ¶ë{þùç…ƒƒƒ8uê”Áöy»š; F%:$nܸ!víÚ¥~MRR’8p ÆúµµµÂßß_üõ×_·µßÔÔTáãã#6lØ òòòD^^žøâ‹/„···HMM½íöh[@ôêÕK|ûí·¢  @½¯®]»Š´´4õz.\]ºtQgÊÏÏ›6mƒnt»ëÖ­eee"''G̘1CÌœ9S¯÷ÀÃÃC¬]»V\¹rEˆM›6 …B!.]º¤÷{ ÏötÉ®Ïû¡ï¾;Öôi«.ÇOJJŠ ñññ¢¬¬Lœ9sF :´Áöêþœ˜˜(|}}ERRR“™¤òöÛo KKKqøða©£h}ëADD-*11Q$&&¶Úö£££E´>¿ y¾nŠÞÿ}aee%Ž9"u”&µ§þ‰>眵©¥Ïå›k+û!ú÷Cë'èšáNŽ[C:tè°´´~ø¡ÔQš÷øãB¸º ‘™)u""¢6A×¾Xk÷?©í¸víšHLLÛ¶mË—/sæÌááá¢gÏžÂÎÎN„•••P(",,LDGG‹—^zI¬[·NÄÇÇ‹‹/Šêêj©›bpÍõ5þüóOq×]w©n­ïÌT…“““Æ¿ÅW_}%¼½½ÅG}¤±nPPÆg€>Ùˆˆˆ±{÷nQRR"233ÅĉŬY³šmCcíÐg›½^ßýÝnŸ»©ïž„¢¢¢B 8P¬]»VÑ2ýó¦ú¹º´E—kw’óÙgŸãÆk¶-í•¶ÏÑûï¿_„„„{{{õ稥¥¥ðôô!!!Z?G«ªª¤n !–Š¥ÂE¸ˆ«âªÔQˆÈíܹS%%%RG!2C^?hl¼) BˆˆôÄ‚jIí¥ äòåËÂÊÊªÍ hÓe Ëo¿ýÖèóýúõÓ¤²k×.qß}÷Ýö~§N*V®\Ù`ù| ¦M›vÛÛmìlll¬Ö}Õ½ð9mÚ4­™¾üòËfß?!n]ÔsssÓ9ëÔ©SÅŠ+,ß°aƒX°`A³û«¿Ý;Ùž¶ìú¼ú컹cMmûÓåø™çœµ©¥ÏåUûké}iÛ®)ôCšê'è“áv[C[ºt©pwweeeRGiZi©B„‡ QS#u"""£Ç‚ª«²²Rdeei-øP(¢AÁGxx¸˜>}: >tÐ\Ÿ·´´TØÛÛ«n­ïÌTjkkEçÎ5n1räH/BCCÕËRSSEçÎEmmíme ~úé'eçÎ^^^Ͷ¡±vè³Í–(©OŸ>wsÛ›;w®˜-nܸ!uSLB‰(D'ñ²xYê(Dd„N:%ˆ³gÏJ…È`Œ¡ Döß'‰ˆHG2Ȱ[1 “¤ŽBí€L&ÃÖ­[1iRëOŸþ9æÌ™Ó*Û®ëã?ƲeË““KKËVßß’ÉdhêH&“¡¬¬ vvvZŸ_µjRRRð駟Ƈ§žz ãÆ»­ýzxxàØ±cèÖ­›Æò´´4„††"''ç¶¶«m¹L&CAAÜÝÝì+,,L=vc™rssáééÙäû×Üþõyòòò0bÄ$''ëµ/CmOÛû¡Ï¾›;Öî$[ýãÇÃÃ'Nœ€——W³ÛW*•Çúõë1bijIa÷îÝ;v,òòòЩS'©ãhR}¶oÛ&m"¢væøñã€VÙ¾êÜ|›>¿ u¾nŠvî܉ & ??¿Á¹¯±iOý}Î9kSKŸË7¶¬5öÕÞû!ÍõôÉp»Ç­¡åçç£sçÎØ¹s'ÆŽ+uœ¦?„†Ë— J†ˆˆÈ¨éÚkíþ'ŽR©DZZÒÓÓ‘‘‘ôôtõ#;;555[[[ÈårtëÖ ¾¾¾vëÖ žžž·¤íi®Ï[ZZ ”––h½ïÌêš5kðꫯ"++ <ðþüóOŒ9ëׯ‡\.LJ~ˆ¤¤$|õÕWê×é“M&“áÚµkpuuU/«¨¨€­­-jkk›Ì×X;ôÙ¦>}ãæž»í6µ½o¿ýo¼ñþüóO888h™þtK|ßÖÜ5 }sÊd2tëÖ QQQxá…àáá¡s>Ò”““ƒŒŒ õgxÝ?ÓÒÒP^^077‡——|}}ÕÕg¹\.‡B¡¸%íLJø‹±çq>ð‘:‘ÂÂB¸¹¹aïÞ½ˆˆˆ:‘AòúAcãM-Z}ÏDDDÔîedd@¡P´‰b]5uÁpÊ”)èÑ£Þÿ}äææ"--펆h½èé鉂‚‚ÛÞnc´ ˆóôôD~~~³™´-ËÏÏÇ+¯¼‚={ö 77Wýʼn>®^½ ___­ÏÙÚÚ¶ÚötÍ®Ïû¡o[ô¹8­O¶úÇOAA:tè Ó6ÇŒƒÒÒR”••ÝQ6CèÑ£ ==Ýø BˆˆˆH2—.]‚»»»Ñƒèª­ôOô9çl¬M-}.ßöCôÓ\?AŸ -}ܶ–Ž;ÂÍÍ RGi^H°t)ðÊ+À}÷}úHˆˆˆˆÈ(ÄÅÅaôèÑ –›››£OŸ>ˆŠŠÂðáÃ1hÐ tîÜY‚„¦-99~~~êŸ ñYdd$Ö­[‡W_}[¶lÁ”)S111ˆÅ+¯¼‚;v`îܹ¯Ó7[Ý °¶¶Ö©è¢)­±MmZ¢Ï­Í¹sçðÌ3Ï`ß¾}êb eúçõsõéƒ7wMãvr¦§§7¹MÒ§§'<==1hÐ ­Ïçååáèѣؿ?~ÿýw>|h°'·œ§ðVaÞÆÛøŸJ‡ˆŒˆ««+qéÒ%©£™„ÑóòòBff&jjj`nn.uœV×±cG„……!66gϞł “Én{{:t@NNNƒ ˆ999: ¦’Éd¨¨¨€µµµzÙµk×]?//¯Á—999èØ±£F¦ÜÜÜw¸¹zõjƒíMŸ>8xð |||`aa¡Î¥«: 99¹Á…ìÛ¥ëötÍ®ÏûÑÒmiŽ®Ç»»; š½[3,[¶ ¸ÿþûñóÏ?ãž{îiéØ-F©T||xç"""úOOO\»v %%%ptt”:N«2¦þ‰>çœMí¿%Ïå[b_-½½¶Úi®Ÿ O†–>n[Kqq1 ïè˜6¨W^€)S€ÄDà6 ›ˆˆˆˆÚ›‘#Gâ?ÿùz†ºw–ÿûï¿qâÄ ¬\¹ÖÖÖê™@êÞQ^õðôô„™™™ÔÍiwÖ­[‡x@ýó~g¦‹ˆˆ<öØc(//Ç·ß~‹]»v¢¢¢0zôh<ùä“8zô(~üñG×"›±h‰>w}eeeˆŠŠÂŠ+ЫW/çZ³O«k[t¹¦aèïéjkk‘““£1ÃSÝYB222PQQ°´´D×®]|–Ëår„……IÜ’öÃ6XŠ¥˜‹¹xÏÂþRG""#âããÂ"co•ˆˆˆîØøñãQPP€íÛ·KÅ`fΜ‰O>ù?ÿü3f̘¡Ók»HŽ~ø¡ÁòüáááÍn×ÃÙ™™Ë~ÿý÷F×ß»w¯Ö}Õ½JDD~úé§ëíÛ·¯Á²Ã‡ãõ×_‡¯¯¯ú"ê7´î»±÷`ôèÑØ¿ƒå@ÿþýµ¾¦)ºnO×ìú¼-Ý–æèzü > ëœ:u ={ölðÚÉ“'cÀ€ؼy3|ðA¤¦¦¶xî–²fÍÜ}÷Ýmg€ÄðáÃagg‡õë×KÅ Œ¥¢Ï9gcZú\`?¤¥ú!ÍõôÍp;Ç­¡­_¿ööö>|¸ÔQtcf|ý5“s«8„ˆˆˆˆ`nnŽaÆaæÌ™xíµ×ðå—_â×_…R©Dee%®]»†ÄÄDlÚ´ >ú(är9rrrðã?â‰'žÀ!CàããøùùaÈ!˜4i^~ùe|þùçHHH€R©DuuµÔMmsÖ®]‹}ûöáÙgŸU/»ÓïÌtáêꊾ}ûâ“O>AçÎÕ³~¸¹¹ÁÃÃ~ø! ×"[kÒ§˜CŸ>·®ž|òIÜsÏ=˜5kVƒL­Ù§Õµ-º\ÓÐ7gkÌÞÒžâøñãØ¾};V¬X'žxãÇÇ€àèè 2<ò6n܈ääd¸¸¸`âĉX½z5âããqñâEܸqJ¥¿þú+¾üòK¼öÚk˜9s&† f7·4¤Y˜øã5¼&u"22]ºtaA‘¡ ""Ò ÄV±UêÔN[·¶Þñ´nݺVÛv}=ö˜pwwiiiÛçíjîH—S¤ŠŠ áææ&-Z¤ó~}||ÄáÇEee¥Ø»w¯ðññBqþüyáíí-¾øâ ‘——'®\¹"Ö¯_/¼½½Ejjj³Û1c†x衇ĥK—DII‰Ø³g=z´Öv¡¡¡â›o¾ê}uíÚUãßîâÅ‹¢K—.bÆ âÊ•+âêÕ«bëÖ­¢OŸ> ¶;zôhñøã‹´´4QQQ!RSSÅŒ3´î¿±÷ --MÜu×]bûöí¢  @\¿~]ìØ±Cx{{‹Ÿ~ú©Ù÷ þ¾tÝž®Ùõy?ôiË휎×®ÇORR’ðóóñññ¢´´Tœ:uJôïß_|úé§Mn?66Vøûû‹ÜÜ\½³¶¶/¾øB˜™™‰½{÷JE»èè[""jQ‰‰‰"11±Õ¶-¢ ôùmÈóuS´xñbaoo/Μ9#u”&µ§þÉížsÖÕÒçòMµ•ýÝéÒOÐ÷ý¼ãÖNŸ>-ìííÅÒ¥K¥Ž¢¿Í›…É„0Ö¾‘Ät틵vÿ“Ú†k×®‰ÄÄD±mÛ6±|ùr±`Á-BBB„ƒƒƒ KKKáéé)BBBDtt´x饗ĺuëD||¼¸xñ¢¨ªª’º)’¨Û—(//iiiâÛo¿#GŽÁÁÁâüùóëßéwfºöwÞzë-aoo/¾ùæåß|ó°´´o½õVƒ×è“­±ºækì;¾;Y·±¾±¶õõésë²ì³Ï>wÝu—¸qã†ÖõZº~;mÑ嚆>9CCCEXXX³ÙMIýÏÓ9sæˆððpѳgOakk«þ<µ²² …B„‡‡‹éÓ§7ø<­®®–º)TÇv±]È„Lœ'¤ŽBDFä±Ç£F’:‘ÁòúAcãMYBD¤'„PKjO!eee¢_¿~¢k×®âìÙ³Û¯¾T’šºhÚÜ:BQUU%|}}Evv¶ÎûÞ¶m›ËåÂÊÊJøûû‹ŸþYý\RR’3fŒ°··öööb̘1"))I§íæçç‹)S¦ˆŽ; {{{1~üx‘™™©µ DZZš¸ÿþû…£££°··‘‘‘"99¹ÁvOŸ>-"##…½½½pppâÌ™3 ¶›——'¦OŸ.:uê$¬¬¬D¯^½ÄÖ­[µî¿©÷àüùó"**J8;; {{{1pà@ñÃ?4ÛþÆþ½tÙž>Ùu}?tÝ·®Çš.mÕõø9|ø° 666¢K—.âwÞÑxÞÙÙYcûYYYûÜ´i“N9 á‹/¾æææÆ=8‹!DD­‚!¤«ÊÊJ&¼¼¼´žïƒöØ?iîœS—6µô¹<û!ÚsëÚѧŸ Ïûy;Ç­¡œ9sFxzzŠaƵÝ{?,D×®BK„ˆˆÈè° „ZRÝÎ+W®/½ô’º`ÄÑÑQãÜÙÕÕUkÁÈéÓ§EYY™ÔMiqõûÖÖÖÂËËKŒ7NlܸQܼySëën·OªO_çĉÂÁÁ¡Áû^ZZ*ìììÄÉ“'o;[»Öw IDATc9tͧm=}¶Ùغõµ­¯k¿U×ý[[[78ê¯Óýsmï­>}ðæ®iè“sРA"44´ÙüíEee¥ÈÊÊÒZð¡P(„………ÆgªàcΜ9âµ×^cÁGV+jÅ 1HŒc¥ŽBDFdÙ²e"((HêDc !²ÿ>IDD:’A†­ØŠI˜$ujd2¶nÝŠI“ZçxúüóÏ1gΜVÙ¶6………¸ÿþûqúôi|ú駘:uªÁömhÛ¶mÃÎ;ñõ×_KE/2™ŒÓS›VVV†… â‹/¾Àk¯½†eË–I©qªÏömÛ¤ÍADÔÎ?~Ò*ÛW›o3Àç·¡Ï×MÑõë×1nÜ8üý÷ߨ°a¢¢¢¤ŽÔ*Újÿ„L›±·Û·oÇìٳѧOüûßÿ†£££Ô‘nOAЫðÀÀgŸI†ˆˆÈ¨èÚkíþ'™†ÂÂB(•Jdgg#''J¥RýHMMÅõë×Õ뺺ºB¡Ph<<==áåå…   ØÛÛKØ"2e•••¸|ù²ÖÏ2¥R‰ŒŒ ÔÔÔ¬­­áíí­õ³L¡PÀ××fff·ˆZR0 £ð~Ãp —:7âé§ŸFYY™ÔQˆ Â×ojÑê{&"""“áêêŠýû÷ã­·ÞÂŒ3ðÕW_aåÊ•–:Z‹‘Éd8vìV¬Xõë×K‡È¤ìرÿøÇ?PXXˆØØX<üðÃRG""""#çää„„„¼øâ‹x衇pÿý÷ãÓO?E—.]¤ŽÖ"Ø?¡¶ÈXÛœœ¼ôÒKØ´i¦OŸŽuëÖÁÖÖVêX·¯C`Õ*`òd * 5JêDDDDD&ÉÕÕ!!! j¬`$!!/^DQQ‘ƶ`C5‹ˆÚUÁGclu >lllÔŸ= …áááŸGr¹2™Lâ‘!…##1/ãeÁÈÀ"S×¥Kܸq×®]ƒ›››ÔqˆL Bˆˆˆ¨EYZZbÙ²e1b,X€þýûcÖ¬Yxå•Wàëë+u¼1dÈÌ›7ýúõ“:Š^TÞ8Kµ5{÷îÅo¼#GŽ`úôéX±b<<<¤ŽEDDDm„µµ5V­Z…ˆˆ<ýôÓèÕ«.\ˆ… ÂÙÙYêxw¬­öOÈ´Óq[TT„?ü+W®DÇŽ±k×.DFFJ«e<üð­ çÌþþàA""""££KÁˆ¶ÁÙ P*•(,,ÔØ–¶b…B€€899ªYDdd***••¥þ ©ÿ¹’žžŽÚÚZÚ >ê~®°àƒ´YŽå¸wc'vb<ÆK‡ˆ$¦º)×¥K—XBd ,!""¢Vqï½÷âĉøòË/ñÎ;ïàË/¿Ä¤I“0þ| 4Hêx·­-R´åìdz***°mÛ6¬^½‰‰‰ˆŒŒÄÑ£Gq÷ÝwKˆˆˆÚ¨qãÆaĈX¹r%Þÿ}¬Y³O>ù$æÏŸOOO©ãÝžãS[d,Çmvv6>þøc|öÙgÉdxå•WðÌ3Ï´íYA´ùä 8X¼X¹Rê4DDDD¤'WWW¸ºº"88Xëóååå ŠE”J%:¤ð]w[ÚŠE üýýÛÅMˆLUý‚úEº|Ôý| Ò× Àƒx‹°ã0f0“:I¨nAHŸ>}$NCdXBDDD­ÆÜܳgÏÆ¬Y³°eˬZµ ƒFHH¦OŸŽèèh^P"" üñ¶nÝŠÍ›7£°°>ø Ö®]‹HˆˆˆÚ;;;,Z´óæÍÃêÕ«ñÉ'Ÿàý÷ßGTT¦M›†ˆˆXXð’)Q{U]]¸¸8lÚ´ ?üðÜÜÜðì³Ïâé§Ÿn¿ƒß<<€>yˆŠ†•:µ [[[õ@nmnÞ¼‰ììì#gΜABBÒÒÒÔEÛ®®®Z‹E üüüàââbȦQý.«Š>šú]®[ðÁßejMoãm#[°Ó0Mê8D$!{{{¸ººâÒ¥KRG!2üv“ˆˆˆZ……f̘3fàðáÃøüóÏñÚk¯áÙgŸÅ°aÇzîîîRG%" üý÷߈Ell,”J%0wî\Ì™3‡EcDDDÔ*œ±dɼøâ‹Ø²e 6lØ€ûï¿:uÂÃ?ŒiÓ¦aàÀRÇ$¢òÇà›o¾All,òóóŠÏ>û S¦LµµµÔñZߌÀ?³gý´·YPˆˆˆˆ¨Q666MŒ46«€R©DBBB“³ Ô}pV¢;ÓØl?u‹>TêÎöŒñãÇs¶2 ÝÑ30K°ш†5Làš 5ªk×®,!2 „‘A…††"447oÞÄîÝ»«¾çÈ‘#1nÜ8DDD {÷îRG%¢VRUU…Ç#..?ÿü3Μ9ƒ®]»bÒ¤Iˆ‰‰AHHˆÔ‰ˆˆÈDX[[ã‘GÁ#<¥R‰o¾ù›7oÆêÕ«„¨¨(DFFbРA077—:.騦¦GÅ®]»ðý÷ß#%%˜?>¦M›¹\.uDÃûôS 8X¶ X±Bê4DDDDd$¬­­õ*Q L×§`DU,"—Ë!“É Ù<"£QXX¨ñûSÿQXX¨^WUð¡P(¦ñ{''' [BÔ´7ð¾Å·Xõ˜‡yRÇ!" ùøø° „È€XBDDD’°±±Áĉ1qâD”––â—_~Á÷ßÅ‹ãé§ŸF·n݈ˆŒ9®®®RG&¢;’’‚øøxÄÅÅá·ß~Cii)üýý‰uëÖ!44”_‘¤ –,Y‚%K–àØ±cزe ¶lÙ‚·Þz nnn5jÆŽ‹1cÆ S§NRÇ%¢zòòò°gÏìÞ½qqq(,,„\.Çøñãñõ×_ãî»ï–:¢´¼¼€wßž|ø¿ÿBC¥NDDDDDm@s#•••¸|ùrƒbUÁHFFjjjh/Q‹( ŒP›VXX¨õ÷@©TâÂ… (..V¯[·à#<<\ã÷ 00¶„èÎxÃOâI¼703àG©#‘D¼¼¼ T*¥ŽAd2XBDDD’sppÀ”)S0eÊTWWãØ±cˆ‹‹C\\6nÜ0`† †ÁƒcðàÁððð855¦¶¶ÉÉÉ8räŽ9‚_ýpvvÆ}÷݇÷Þ{~DDDD$µ{î¹÷ÜsV­Z…sçÎa×®]سgæÌ™ƒªª*ôïß#FŒÀ!CŠ:H™ÈäàðáÃ8xð ~ûí7œ8q–––6l–,Y‚±cÇ"00Pê˜Æeölà»ïnýyâ`c#u"¢ÿgïÎãª*ð7Ž@vÙE\@Ü…ÒÔé×¢–æRfjæ’éhfe9•-j¥ÕT:N53šÅ¤-j3eÙÔ”8Sie¥¥æ.€Š,*«ì üþ¸sïp媨ÀayÞ½î‹Ë¹‡sŸ„÷ÀyÎWDDDš8''§ZFl$naÄÙÙ™öíÛÛ,‹DDD†½½}C…¹ða«ôqèÐ!òóó-ëž[ø˜9s¦åû9**ŠÖ­[¸'"õo XÍj^åU°Àè8"b€€¾ûî;£cˆ´*„ˆˆˆH£âààÀÀ8p ‹/&''‡-[¶ÏçŸÎ+¯¼Bee%aaa 0€þýûsÍ5×лwoôÒFĹ¹¹lß¾íÛ·óý÷ßóÃ?——GëÖ­‰eêÔ© :”~ýúéÿSir¢¢¢ˆŠŠbÞ¼y²e˾øâ ËñIUUQQQ 8ÐRéܹ³Ñ±EšC‡ñí·ß²mÛ6¾ûî;°³³#::šk¯½–§Ÿ~šn¸A'Ö\ˆüõ¯Ð³',] Ï)((Lß“~~~–ïŸê…ˆˆBCCõ·/‘Z˜Ç<^ã5–³œgÐ)DZ¢ÀÀ@rrr(--ÅÙÙÙè8"Íž^¡ŠˆˆH“âééÉСC:t(UUU$$$°}ûv6mÚÄúõëÉÏϧ¢¢{{{""",E‘ž={Ò£G:tè ‘Ò"QRRBBB{öìaÏž=ìÞ½›]»vqúôiBCCéÙ³'cÆŒáꫯ¦ÿþøûûœZDDD¤aùøøpË-·pË-·PVVÆŽ;øî»ïøùçŸùûßÿÎÒ¥K©¬¬ÄÛÛÛª Ò»wo"##qtt4x/DŒS^^Nbb"»ví²*€äææbooO—.]èÓ§¿ÿýï0`111899»é›3Þyî½þóÓä‘FÊÇLJ˜˜˜K*Œ¤¥¥±ÿ~8@QQ`:Ñ?$$ÄfYD'ú7}çû>HOO'!!ÂÂB æ÷ "õà /á–²”û¹ôwd‘–& €ªª*233 5:ŽH³§W°"""Ò¤•••±}ûv–/_ί¿þÊõ×_ϼyóèÛ·/;wîdÿþýìÛ·÷ߟ… RYYi¹BPtt4ݺu³ü‚¯{÷î½K" *''‡}ûö±ÿ~Ë/È÷íÛGbb"gÏžÅÑÑ‘Î;ÃàÁƒ‰ŽŽ¦_¿~*ˆˆˆˆØàääÄ€0`€eYYY‡bçÎìܹ“~ø¿üå/ãàà@hh(VÇ'={öÔë-iVòòò8|ø°åxÃ|ü±oß>JJJppp K—.ÄÄÄ0zôhbbbèÝ»·¦~Ö{{xí5èß6l€ñãN$""""rÙ.µ0b. ÄÇÇ_p2„ #Ëù&Ť¥¥]pRL·nݬ š#ÒpäAV°‚—y™?ð£ãˆH  ==]…‘ #i’rrrxíµ×xýõ×ÉÉÉaüøñ¼ýöÛôéÓDzNPP#GŽ´¼æÌˉ‰ûŒƒRZZ @Û¶m‰ŒŒ¤K—.„‡‡Ó¡CÂÃà #((H“E¤É)))!%%…””Ž=JJJ GŽáàÁƒŽ;FEEÎÎδoßÞòy‰‰±úZ¨ð!Òx´¦5ñó8s™K{ÚIDù‚¼'itf—ˆˆˆ4)ùùù,_¾œåË—ÓªU+î½÷^î¿ÿ~K³üB<<<èß¿?ýû÷·Z^YYÉÑ£G9xð ¥,rèÐ!¾ùæŽ;FYY`:‘+$$„:V£,„««k½ì·Èùdgg“žžÎ±cǬJæ·Õ®½¼¼ £cÇŽ 6Œ¹sçZNÆjÓ¦{!"""Ò²8::Ò«W/zõêeµ¼²²’””Ø¿?‰‰‰ìÝ»—O>ù„´´4ªªªðõõµ*ˆ˜ï‡††Œ‡‡‡»%Í\~~>©©©;vŒääd«òGrr2999ØÙÙYNÎéÒ¥ Æ #::š¨¨(ÂÂÂt¡…ÆäÅaãFxæxå£ÓˆˆˆˆˆâR #æ²HRRñññ:tˆüü|«mÙ*‹EëÖ­j·•²²2RSS­>ç>þQQÁ@œ“.ÿðÁÀ­¦¶è¸R¤i¹—{YÆ2^à^åU£ãˆHrqqÁÛÛ›ôôt££ˆ´*„ˆˆˆH“PXXÈêÕ«Yºt)ÅÅÅÜwß}<þøãure\{{{Ë T7ÝtSÇm NKKc÷îÝ$$$PXXhY×ÅÅ…   Ë/wm½ Õ ZrQÅÅŤ§§[~9nëmjjªÕÌßôìÙ“[o½µÆDDDD¤ñ²···¼~>|¸Õcæ“'Î=6Ù²e‹å5¢ÙùŽKª¿. Õ$8±ÈÉɱ:1çÜã#GŽ››kY¿ú±Gll,ãÆ³|ï¶ä“œš__X²æÌ)Sàœ’šˆˆˆˆˆÔ¾0rnÙ!>>žÃ‡“——gµ-[e‘ˆˆ"##qwwo¨ÝªSÕga«ôqôèQΞ= XOFDD0xð`ÚûûÓóË/ñ¯ñ\i) lšfص«Á{&"Wʰ€¹Ìeó'ÜèH"Ò€5!D¤è/~"""Ò¨óÚk¯ñÒK/QZZÊܹs™7oÞÞÞ –áB¿è­ªª"==ÔÔT2228q⤦¦’™™É×_MFF'Ož´\ÍÀÓÓÚ¶m‹ŸŸmÚ´¡mÛ¶øûûãççg¹™—©@Ò´UVVrúôi«ÛÉ“'­Þ?uê”Õ²’’ËÇ;;;Ó®];‚ƒƒ  k×®\ýõ´oßžÀÀ@Ú·oOppp¤DDDD¤qrrr²œ,aKnn®ezƒùåĉ¤¥¥ñõ×_“žžÎÉ“'-ëÛÙÙѶm[Ë1I»ví,Ç#mÛ¶% Àò˜ù­4æc óÛŒŒ Ëû'Ož$33ÓêñêÇ«þþþLPP±±±HHHÁÁÁøøø¸wR§fÌ€¸8¸ÿ~øæ°³3:‘ˆˆˆˆH“R›Âˆ­’D|| #FèXE¤ ›Ît^â%–²”7xÃè8"Ò€Ti *„ˆˆˆH£µiÓ&æÎËÉ“'™>}: , ]»vFDzbggGPPÐE'/”——“™™i)Šœ:uŠÌÌL²²²8}ú4™™™ìÝ»×R(**²úxGGG¼¼¼ðööÆËË «÷mÝ÷ððÀÛÛÜÜÜðôô¤U«Võùéh–Š‹‹)))!77—’’ ÈËË#''‡ÜÜ\òòò,oÏw¿ú•tÍ|}}-'ÖùùùBLLŒUÈ\iÛ¶­{.""""M‰··7ÞÞÞtïÞý¼ë”––’––f)ŠœnžîDÐo¿ý¶ÆDPsaäܲHDD:uºì ‡[ø8·ôQ›ÂGL«wq1M/œ4 þýoX±F†Îá¾ûà·¿ýnA¤ÉqÄ‘§yšéLç!’H£#‰HÑ„‘†£Bˆˆˆˆ4: <ôÐC|ùå—Œ;–—_~™ÐÐP£c]GGG‚ƒƒ ®ÕúEEE–±N:Evv¶UÁ ''‡¼¼>>¸¸¸àêꊓ“­[·ÀÝÝGGGìíí-¿H6¯XM.V:ñöö¶}ÕŸó(++£°°ð¼›ËçÞÏËË£²²’ŠŠ Μ9@aa!eee–«-åååQZZJAAgΜ¡´´”üü| )--µYä8wl•q:tèP£ÀS}ò‹ŸŸz)."""" ËÙÙ™ððpÂÃÃ/ºnEEEIvÕ æûÇŽcÏž=VËÊËËÏ»Ýê¥uooo\]]quuµºïììl)˜!ª/óòòÂÞÞÞj™««+...ç}ÞKhQý ­ç*))¡¸¸0»•––RYYI^^žÕ²ªª*Ë1…yYqq1ÅÅÅäææRTTd)ŸW/œ£££Í"NDD„e¹ù­yú‹¹ô¡ã¹ ¾}aÖ,øýïaäHhÀ©¬"""""-««ë'‚æçç“’’ÂÑ£GINNæèÑ£¤¤¤°sçN>üðCN矆ëÍàçëGXX:t°z@JJŠe[ÕßV¿(„ŸŸ:t C‡ôìÙ“‘#GnÙ–§§gýRìíað`Óm÷nøóŸáñÇaɸçxàhß¾þsˆH™Ä$^äEžåYÞá£ãˆH `Û¶mFÇiôW i4rrrX´hþóŸéÙ³'[·neàÀFÇ2„››¡¡¡—U„)//'77—3gΗ—GII ………äçç[&\PRRb)?”””——Gvv6¥¥¥V'8Ù*W˜Ofj¬<<.©ôôtúõ뇛›[gi’¦N…7ß„‡†mÛ@¤iÔNp‚/ù’u¬;ï:®®®DGG7`ªzæì S¦˜nÛ¶Á‹/š¦…tì÷ß3f€. 'Ò(ÙaÇbs+·ò#?r5º…Hs@YYÙÙÙ´iÓÆè8"Íš.»-"""†ùàƒèÑ£{öì!>>žW_}Ue©weee€iÒ ˆü—¼ú*lß~ht¹ˆ5¬ÁoF2Òè(Æ46m‚ÄD>,€öíMCŽ7:ˆØ0ŠQô£‹Ylti€ébÁ"R¿T‘wòäIn¿ývÆϘ1cسg7ÜpƒÑ±DDDDDD¤…(--ÅÞÞ Ñ±Ò»7L˜óçËS"""""Ò8­a wqÎ8ÅX;Ê’?n*¸wìãÆ™ ï"Ò¨,f1ŸñßðÑQD¤žµk×€ÌÌLƒ“ˆ4*„ˆˆˆHƒÚ²e Ý»wççŸfóæÍ¬\¹£c‰ˆˆˆˆˆH RVV†³s ?aFä|–,´4XµÊè$"""""rÛØF LcšÑQ¶máÑGáÈx÷]8z®¹bcaÍ8{Öè„" e(×qOò¤ÑQD¤žùúúbooOVV–ÑQDš=BDDD¤ATUU±dÉnºé&®¿þzöìÙÃ7Þht,iJKKqrr2:†Hã÷ß‹C^žÑiDDDDDĆ8âèK_zÓÛè(“ÜqüðlÝ pÏ=Ð¥ ¼ø"äæP¤Å{–gÙÊV¶°Åè("Rìíí±T± IDATñööæôéÓFGiöT‘z—ŸŸÏرcY´hK–,aݺuš """""""†Ñ„‘‹xâ ¨ª‚?üÁè$"""""rŽB ù€4¤6 ‚  1FŒ€gŸ…`î\Ó1Ä q7ñ8SE•ÑqD¤ùùùiBˆHP!DDDDêÕ®]»èÛ·/?üð_}õ>ú(vvvFÇ‘LBD.ÂÛ,€åËáØ1£ÓˆˆˆˆˆH5Ø@)¥L`‚ÑQšŽŽaÅ HKƒgžM“CFŽ„o¿5:H‹´„%ì`Ÿñ™ÑQD¤µiÓF…‘ BˆˆˆˆÔ›õë×sÍ5ׯÏ?ÿÌÀŽ$""""""¢ !"µqÿý‹DDDDDDª‰#Ž[¹?üŒŽÒôxzš¦ƒ> ëÖAV–iŠHl,¬YF'i1bˆa£xœÇ©¤Òè8"ROTi*„ˆˆˆH½øãÿÈĉ¹÷Þ{ùâ‹/ð÷÷7:’ˆHýIKƒ9s`Ö¬ÿÝvî4ݪ/›3Ç´®ˆˆˆˆJBDjÁÉ ž}Þ~~ùÅè4"""""âÛØÆ4¦¥ist„;î€ï¾ƒ; :¦O‡ÐPS)>'Çè„"-Âs<Ç>öñ!EDê‰ !" C…©SUUU,Z´ˆyóæñÔSO±|ùrZµjet,‘úuü8üùÏðæ›gº?nº™ßóMÓ:ÇVDDD¤ÅÓ„‘Zš0úõƒÇ7:‰ˆˆˆˆˆãoĆ¥ùˆ‰1M9x¦L+LÅY³ 1Ñèt"ÍZwº3Žq,d¡¦„ˆ4S*„ˆ4 BDDD¤Î”••1iÒ$žþyÞyç-Zdt$‘†qõÕ gÏBy¹íÛÙ³dZWDDDD UVV¦ !"µag/¼_|›7FDDDD¤E«¤’µ¬ån _ 7ÿ;Ï=g:ŠŽ†‘#!>Þèt"ÍÖb“H"Ø`t©*„ˆ4 BDDD¤N0bÄ6mÚħŸ~ÊĉŽ$"ÒpììLWrt<ÿ:NN0mši]1Tii©&„ˆÔֵ׈0>Têj"""""Fù‚/8Îq&3Ùè(Í›‡Ì II°q#””À!з¯i’Hy¹Ñ Eš•.taXÈBÎrÖè8"RÇTi*„ˆˆˆÈ+..fĈìÙ³‡¯¾úŠ!C4¢XDZ I“.üG€²2˜0¡áòˆˆˆˆÈyiBˆÈ%záøõWøà£“ˆˆˆˆˆ´XqÄq-×I¤ÑQZ{{ÓtÍ›aÇèÞ¦O‡ÐPX´tr«HYÈB’Hb댎""u¬M›6äææRQQat‘fM…¹"eeeÜqÇìÞ½›Ï?ÿœ¾}ûIDÄ]»BTÔùŠ2ý±@DDDD § !"—¨[7SÁýé§á¬®Ö)""""ÒÐ²Éæ>aÓŒŽÒ2Åʦƒ;³fÁ«¯Bp°izüþýF§iò:Ó™;¹“Å,¦4.ÒœøùùQUUENNŽÑQDš5BDDDä²={–É“'³uëV¾üòKúôéct$cM™ŽŽ5—;:ÂÔ© ŸGDDDDlÒ„‘˰p!$%ÁúõF'iqÞáqd,cŽÒ²š¦ƒ= +VÀO?A0dlÚUUF'i²žæi’Iæ}Þ7:ŠˆÔ¡6mÚ¥ÉZ"õJ…¹,UUUÌœ9“O?ý”M›6qÕUWIDÄx'‚­Q§0~|Ãç›4!Dä2tîl:æY´ÈöqˆˆˆˆˆÔ›8âÏxÜq7:Џ»ÃÌ™°olÜhZ6jôé«VAq±±ùDš Ntb“x†g4%D¤Q!D¤a¨""""—eöìÙ¼÷Þ{lܸ‘k¯½Öè8""C‡ vvÿ[fgW]ááÆå+š"r™ž~’“á½÷ŒN"""""Òbìa»ØÅ4¦EÎeo#GÂæÍðóÏЯÌ aa¦2ýéÓF'iRžäIŽr”wxÇè("RGTi*„ˆˆˆÈ%{å•WX½z5ëׯgÈ!FÇi\¦L1ýÀÌÞÞ´LDDDD M¹L;šŽož}VSBDDDDDÈjVÓ…. `€ÑQäBúô•+M%úÙ³áµ× 8Øt µw¯ÑéDš„Žtd2“yŽç(§Üè8"Rœiݺµ !"õL…¹$[¶lá±ÇãÅ_dÔ¨QFÇi|Ư¹lìØ†Ï!""""ç¥ !"Wà‰' %Ö­3:‰ˆˆˆˆH³WFïó>Ó˜†vÿ1^@€i:ȉ°jìÜ =zÀ A°iTUP¤Q{š§9ÎqÖ²Öè("RGÚ´i£BˆH=S!DDDDj-%%… &0nÜ8~÷»ßGD¤qjÛþïÿ U+Ót뮃víŒN%""""ÕhBˆÈˆˆ€ `éR¨¬4:ˆˆˆˆH³ö1“Cwq—ÑQäR9;ÿo:ÈæÍàã£GCd$¬XEEF'i”:ЩLåž¡Œ2£ãˆHhÓ¦ ÙÙÙFÇiÖT‘Z)((`Ô¨QóÆoGD¤q›<ùWxš4ÉØ,""""Rƒ&„ˆ\¡ 1>ùÄè$"""""ÍZqÜÄM„bt¹\vv0x°i:È®]pýõðøã=fš$""VžäIÒIçmÞ6:ŠˆÔΜ9ct ‘fÍÁè"""ÒøUUU1}útÒÒÒøé§Ÿpss3:’ˆÈ%+(( ¼¼ü’?®¤¤„âââKúû¾}éЪGûô¡2)é’>ÞÕÕ—Kú'''Z·n}É'"""ÒÒhBˆÈêÚF2M ¹õV£Óˆˆˆˆˆ4K'8Á—|É:ÖEêJÏž°r%<û,ÄÅÁŸþË—›&‡<ò\}µÑ E…PB™Æ4žã9¦2'ta‘¦ÌÝÝ‚‚£cˆ4k*„ˆˆˆÈE­X±‚>úˆÍ›7nt©c¹¹¹TUUQQQa¹*Cqq1%%%äççsöìY*++ÉËËL'Ðýw”uõ¢ENNŽe»UUUäææZ=WõíšUÿ°]À0g4«þüfyyyTVV^ÚÎ×£÷ÿûöÎÞ½ ÍQ½½=^^^VËÜÜܬN†´³³ÃÛÛÛj[«÷]\\puu½à:æ÷qww¯ñü^^^ØÛÛÓªU+<==kl×ÃÛEDDD.…&„ˆÔ L'+mÞ C†FDDDD¤Ùy›·ñÆ›‘Œ4:ŠÔ5xôQxè!X¿^z úõƒaî\3þ{á1‘–ê žàoü8â˜Å,£ãˆÈЄ‘ú§Bˆˆˆˆ\о}ûX°`‹-âÿþïÿŒŽ#Ò,åææRYYInn.gÏž%??Ÿòòr ,Ås‘¢°°²²2K Ã\Ö¨¾ €3gÎPQQaUÊ(++£°°À²Ëu±úÍ<==iUíÖÕ?άC‡]Ç\0sppÀÃÃÃjwwwÏ›ÙV¢¶¼½½±³³»¤±ûo9eGµÏGmØ*ÒÔÖÅJ1æï«êÌß+fÕ‹AfçNW±µÎéÓ§­Ö1/›Õ¦Pt9ªOE©þ=`«|âíí½½=ÞÞÞ–Ò‰ùqgggÜÜÜ,åó÷µy›æïåêÛ‘¦«´´T…‘+uÕU¦"ÈóÏ«"""""RÖ²–ILÂM¸l¶œaÊÓmÛ6xñE?""à`Æ Ðdxi¡Báîa K¸›»õ³P¤ sww'55Õè"Íš !"""r^¥¥¥Lœ8‘¾}ûòè£GÄæ“Ø (..æÌ™3äççS\\Laa!¹¹¹STTDnn.EEE‘ŸŸ_£ÌQTTDiii2GmÔödõððpììì.iê‚ùc«&Ì϶O²©æi1Õ‹+æÿoàʦ٘‹6$%%]´tU+•´nÝ'''<==qssÃÍÍ oooË:ÞÞÞ´nÝ777<<<ðððÀÍÍÖ­[×(W‰ˆˆHÝ*++³š’&"—éÑGað`عbbŒN#""""Òlle+ $ð¾e¹4{ƒ™n‡Ák¯™¦2.\S§Â#@HˆÑ EÜ“<Éßøoñ³™mt¹Lš"RÿT‘ózì±Ç8zô(»ví²ºz¿HcV\\L~~>yyy–›¹¨Q\\Lnn.………–õΜ9Cqq1äååYÖËÉɱ:ý|.v¢w@@€e‚¹„Q›Éçžh.ÒT/ùûû˜ä“SlMá©í$ó6Ž;vÞâØ…˜føøøX~¶xyyáî«+üäíí§§'^^^xzzZJ`"""¢ !"uæÆ¡OX¶ Þ}×è4"""""ÍFqô¥/½émtih;Êðä“ðÖ[¦rÈ_þ·Þ óæAÿþF'i0A1é<ÇsÜÍݸ¢¿u‰4Eîîî– BŠHýP!DDDDlúÏþÊ+xûí· 3:Ž´æ“©Í… [·Ú<~>...øøøX®Þïããcy?00èèh«ÇÎwßü¾‡‡zI-Òyxx 3§úÏ´ê?ã.v?--}ûöÕx,//ÊÊJ›ÏUýçXõÛ¹?ãÎ÷x`` vvvõþ9©oš"R‡z¦O‡çŸ‡ÐP£Óˆˆˆˆˆ4y…òwþÎR–EŒÔ¶­i*ãÃÃÇÃ+¯À5ט¦3>ø Lœú;¥´ XÀ›ÿýï~î7:Žˆ\wwwM©gzU("""5”””0kÖ,FÍäÉ“Ž#MLQQYYYdgg[nYYY–eYYYäää——G~~¾å~^^6·Yý*÷æ+Ý{yyL·nÝðöö¶Z~îý†8¡[Där¹ººâêêJPPPl¯ªªŠÜÜ\«IIÕ''åçç“››Knn.ùùùdeeÕX'//Ïæ¶ÍSÌ?[Í÷}}}-·6mÚЦM«÷}}}qss«“ý© š"R‡î¼žxÂtÕÚ?üÁè4"""""MÞzÖSJ)˜`ti œœàŽ;L·mÛàO‚{îÅ‹aæL˜5 ¼½N)Ro ä·ü–¥,e:Ó5%D¤ òððP!D¤ž©""""5,Y²„ŒŒ þýïE T\\\£Ôqn¹ãÜDz³³mNèðññ±:9Ø××—ÀÀ@<==ñññ±Yâ0ß÷Ö/0ED.‰e²Ç•¨^*1—Eª—Frrr,ï§¥¥±wï^²³³9}ú4¹¹¹5¶çââbU±U ±õ˜‹‹Ë퇈ˆÈ¹*++©¨¨Ð„‘ºâè÷Ý/¼O> žžF'iÒâˆã6nÃ?££Hc3hév䈩òì³°d L›óæA‡F'©ó8«YͼÁƒ7:Š4f;ЦBH\,[fšÚ8|8<ö htB‘:@³˜Å ¼Àoù­¦„ˆ41”””P^^Ž£££ÑqDš%BDDDÄ¢ªªŠ™3gÒ³gOæÌ™ct©…üü|ÒÓÓ9uê§N:ïýŒŒŒWkwuuÅßߟÀÀ@Ú¶mKûö퉽`±ÃÁA/EDäâìííñóóÃÏïÒ®`WQQqÞòHVV–åß¶ƒZî[mÃÛÛ›€€Ú¶m‹¿¿¿å~Û¶m-ÿæ™ï{êêÕ""-Rii)€J„"uÉ×&O†?ÿ|ììŒN$""""Ò$ÅGA f°ÑQ¤)ðô4•òï¿>û žÞ4A$&Ætl6q"èï»ÒL<Ê£¬üïñÑqD主»PPP€ÁiDš'½â‹>úˆ­[·òã?ÒªU+£ã´h¹¹¹œ8q‚ÔÔTÒÓÓ9~ü8éé餦¦’‘‘a™êQRRbõqmÚ´±ºZz¯^½ð÷÷·:!Ö|ß|À%""ÒX888Xþݪ­‚‚2228yò¤¥iž„•™™É¯¿þj™„•••eõ±...–3&00ÐÐPiß¾=ÁÁÁxyyÕõ®Šˆˆ4!D¤ž<ð¬\ ññ0dˆÑiDDDDDšœJ*y‡w˜Æ4Z¡¿ÕÊ%hÕ FŽ4Ývî4M™>Ý4-dæLS9Ä××è”"W¤í˜Å,^äEf1KSBDš@…‘ú¤Bˆˆˆˆ¦+r?ùä“Lœ8‘ØØX£ã4[dffrìØ1KÁ#--­Fù£¨¨Èò1®®®DHH]ºt©q…ss D£ED¤¥qww§S§NtêÔé¢ë–——[Ê!çNÕÊÌÌdß¾}ÄÇÇ“ššj5yÄÍÍ !((Èòï²yY»ví4IKD¤‰Ð„‘z ¿ùiJˆ !"""""—ì_ü‹TR™ÊT££HSkÖÀâŦÒþŠðÊ+¦i!? QQF'¹ló™Ï_ù+oò&÷s¿ÑqD¤–̬=sæŒÁIDš/© """¬ZµŠ#GްiÓ&££4i™™™$''“œœLJJ 'Nœ°šî‘™™Iee¥eývíÚHpp0‘‘‘\ýõ–“Kƒƒƒiß¾½Úñ"""uÄÑÑÑRæèÝ»÷×ÍÎÎ&--ÍRÞ¬^âüõ×_-%3{{{hß¾½¥,Ò¾}{ÂÂÂ'<<ü’&ŸˆˆHýÑ„‘z4gŽé$£” 3:ˆˆˆˆH“G×r-¸øÅoD.*<^xžxÞz þøGX½†‡¹sað`£Š\²ø-¿e)K™Á \p1:’ˆÔ‚yBˆ !"õG…¡  €gžy†û;§QËË˳”=ÌÅê7ód‚ƒƒ &$$„ßüæ75ŠAAA:IDD¤‘òõõÅ××—îÝ»ŸwÒÒRKéóĉ¤§§[¦€íÚµ‹O?ý”ÔÔT***Ó¤s9ÄÖÍÓÓ³¡vOD¤EÓ„‘z4f ˜N2zî9£Óˆˆˆˆˆ4Yd±‰M¬d¥ÑQ¤¹ñð0@xþùOøÓŸLSûô{ï…)SÀE'ÕKÓññoG³™mt©ó„‚‚ƒ“ˆ4_*„ˆˆˆ«W¯¦  €'žxÂè(†+++#55•¤¤$«[ZZééé$%%YÖõññ!""‚ˆˆFŒa¹Ahh(z©%""Òœ9;;FØ®~]QQÁÉ“'-¯#Ì·ðÏþ“””Ëô0 ²z]Add¤å—¥""re4!D¤98ÀŒð׿ÂÓOƒþ?©•wyG¹ÛŽ"Í•½=ŒiºíÜ +VÀý÷ÃÂ…0k–©0Ò¦Ñ)E.*@îážçy¦3'ô»‘ÆNBDêŸÎRiá***øãÿÈŒ3ðóó3:NƒÉÉÉ!))‰}ûö±ÿ~ËýÄÄDΞ= €‹‹‹å„Ìnݺ1räH”)"""—ÄÁÁ   ‚‚‚ˆ‰‰©ñøùʨ;wîdÓ¦M¤§§[Ö ¤[·nDDDm¹Ñ»$"ÒäiBˆH=›1Ã4dÓ&¸]'³‰ˆˆˆˆÔFqŒg<îèïÒbb`ÍxñEX¹^}^xƃǃèh£Š\У<ÊjVó7þÆLfGD.ÂÉÉ '''M©G*„ˆˆˆ´p6l 55•xÀè(u®²²’””Ù¿?‰‰‰8p€••˜Zè‘‘‘tíÚ•I“&IDDáááxyy¼"""ÒÜ999]°Ô‘››KJJ Gޱ¼–ùùçŸyÿý÷-WÑñóó£k×®DEEItt4QQQtèÐ{{û†Ü‘&ABDêYp0 qq*„ˆˆˆˆˆÔÂ/üÂ.vñ¯EZšÀ@X´yÞ{–/‡îÝáÆáÁaݳ3:¥H !„p7w³„%ÜÍÝš"Ò8::R^^nt ‘fK…‘î•W^aìØ±tìØÑè(—­²²’dz{÷nöïßÏHLL$!!’’ÀtEí®]»Ò£GÆo9a2$$Äàô"WÆ®¿„­ªªºäíUÿ[ËŒbggg3GCe4òsaÄsWTTpß}÷±jÕª{N±æííMïÞ½éÝ»wÇŽ?NBB 8p€„„6mÚDFF®®®DFFE×®]éÚµ+½{÷¦cÇŽ*ŠˆH‹¦ !" àž{àÎ;áøqÐï^DDDDD.(Ž8ºÐ… 0:Š´Tîî0s¦iâã?ÿ úŒ½zÁ}÷ÁäÉàêjtJ+ X@q¬e-Ó™nt¹***ŒŽ!Òl©"""Ò‚ýôÓOüüóϼþúëFG©µââböîÝË®]»,·={öPPP€ƒƒáááDGGsÓM7ñàƒMdd$ÞÞÞFG©UUUç-I@í #¶¶w±eMCe4òsaÄsÏŸ?Ÿk¯½¶AŸSDj/$$„† bµ<77×2!Í\yçwHJJâìÙ³xxxгgOz÷îM¯^½èÓ§Ý»wÇÅÅÅ =iXš"ÒF__X»,0:ˆˆˆˆH£UFïó>¿ãwØÑ¸ÿ#-€½=ŒiºíÚù Ì O=Ó¦™¦†R€PB™Â–²”©LÅA§ÂŠ4jš"R¿ô¯ ˆˆH ¶fͺtéBÿþýŽbÓÙ³gIHH`çΖÛO?ýDYYîîîDFF͸q㈉‰¡oß¾¸¹¹[D¤YHNNfçÎ,[¶Ìè("r‰¼½½éׯýúõ³Z^^^ÎÁƒ­^[½ýöÛЪU+"##‰‰‰!&&†AƒÑ»woZµjeÐ^ˆˆÔMiNN0q"¬^ ?ü""""""FÙÈFrÉe SŒŽ"b­woX¹žyÆT yí5øãaÜ8˜?ºw7:¡Oò$kXû¼ËT¦GD.@…‘úeot1Fyy9ëׯgêÔÆsPœŸŸÏgŸ}Æ‚ ¸þúëñññ¡{÷îÌž=›_~ù…þýû³víZV›ýÈËËãá‡&""¹ûî»ùñÇHII±úZ>|˜‚‚«eÕשí×v÷îÝ :”Ö­[ãééɰaÃøâ‹/λŸ"-‰££#ݺucÊ”)¬X±‚mÛ¶‘——Gbb"ï¾û.Æ #99™ ‹7Üp ,àóÏ?·ùóJD¤)*++ÃÎÎGGG££ˆ4oÓ§Cr2|óÑIDDDDD­8⸉›BS¤‘j×-‚'`Õ*عzô€Aƒ`Ó&¨ö·H‘†ÖÜÅ]<˳TPat¹**ôÿ©H}Q!DDD¤…úüóÏÉÊÊâ®»î2,Cvv6ü1óæÍ#66___n¹å6nÜHDD/¿ü2»ví"//o¾ù†—_~™qãÆÑ¹sgìíõ2FäBÎ=Ñÿðáà 6Œ±cÇräÈK1àæ›oæðá×¼ý‹mïºë®cÇŽxzzZÔÿãÿÀËË‹ 6X–UTTÅŽ;¸îºëÎûœærKUU•å¶zõj«uf̘ÁìÙ³IOOgÇŽ”••ñÈ#ÔÈ>dÈFŽIRR)))Lš4‰Ñ£G“ššzYŸ‹ÚnoôèÑL:•ììl¶oßNvv6<ð€Õ:Gޱ|n“’’8rä·ß~;&L¸ìç6îfÏžÍ#ãðáÖ‰aaaìÙ³‡«®ºŠªª*:uê„»»;•••têÔ‰]»vfÉ~î×¶¨¨¨Æ×öàÁƒŒ?žùóçsêÔ)¶oßNaa!7ß|ó÷U¤%³··§K—.Œ?ž—_~™­[·’——Ç/¿üÂK/½DXX7ndøðáøøøpÕUWñ»ßýŽO>ù‰>㦠IDAT„œœ£ã‹ˆ\–ÒÒR/XŽ‘:Уôéï¾kt‘Fé'ØÌf¦1Íè("çì S¦ÀÞ½°y3øøÀèÑ +V@Q‘Ñ ¥…z‚'8ÊQÖ±Îè("rš"R¿ìªªTÓ¹vرžõŒcœÑQ¤°³³cýúõŒW?ßO«V­bæÌ™6»çž{8xð Û¶m«—ç¶¥²²’_~ù…øøxâããùúë¯)//'""‚Áƒ3pà@n¸á‚ƒƒ,“Hsp¾Ùª¿ÔŸ4iW]usçεZgÙ²eüòË/¬]»Öj{ç&œ»¬6Û«ªª"007Ò¿Ìc=ÆÂ… ùöÛoS©aРA¤§§_ô¤<[Ùª?¶qãFFmY–œœÌ Aƒ8qâ„Uöž={2þ|«뭷ؽ{7+V¬¸¤ W²½œœ:uêd5ébòäÉÄÆÆÖøÜþíocÚ´i—ýÜvvvüç?ÿ¹`馺Î;óý÷ßãççwÑum퇇‡GÅ××ײ,99™ˆˆ«}èÛ·/qqqôêÕ 0_~ùe¶lÙb•ýܯmbb"7ÜpƒÕ×vâĉ 6Ìj²Ibb"QQQçý¾‘Ú9yò$?üðß~û-ñññüòË/ôéÓ‡Áƒ3bÄ Ð ¥Ý;wS/Û7¿6¯^^¬/z½."õçÍ7ßäá‡&??ßè("ÍßË/ÃÒ¥žn:yHDD¤¨í±X}Šˆ,a ËYÎ NàŒ^/K”˜þ3¼ñ¸»Ã=÷Àý÷ƒþÞ/ l*Sùžï9ÀZÑÊè8"bCTTwÝuO=õ”ÑQDê\Cþþà|ç›êÒÚ"""-T||bÒ¤I´k×ŽØØX^ýuÂÃÃY·nÙÙÙ9r„•+W2eÊ•AD.Sõ©¶N|çÖ[o­±ü¶Ûn#>>þ’Ÿ¯6Û³³³ãæ›o¶œØâÄ òòòö–[naóæÍ—üœW²=²³³­–mÞ¼ÙæçÖÖ”‹K}¾ú‚yªËÊÊÂËË«VëÚÚk®¹†Q£Fñ¯ýË2%&<<¼Æ÷çÔ©SY¹r¥åý×^{yóæÕxŽs¿¶aaa5¾¶ÿþ÷¿¹ñÆ­–EFFª "Rüýý9r$/¼ð;vìàÔ©Slذ>}úðî»ïò›ßü†€€¦L™ÂÇLII‰Ñ‘EDΫ´´'''£cˆ´ 'B~>|þ¹ÑIDDDDDµ¬e2“U‘¦Ë<$%~÷;X»:v„qãàÇN'-ÈS>ÞjÙîÝ»‰ŽŽ¾ìmŠÈ¥óóócæÌ™lÛ¶””î½÷^Ö¬YC§N3f ?ýô“ÑE¤…Ó„‘6v,TUã‘–¨€þÁ?˜Æ4££ˆÔŸAƒLHL„[n LS$çÎ…cÇŒN'ÍPg:swð ÏhJˆH#äàà  !"õH…‘hïÞ½ôìÙóŠ·sðàAî¸ãú÷ïÏÉ“'yï½÷HIIaÑ¢E×AR©+ .dÙ²e¬^½š“'OrêÔ)Þ|óM–-[ÆÂ… ëu{ÇgÑ¢ELž<ÙjùwÞÉ‹/¾È°aÃjý¼íÛ·çûï¿§¼¼œ/¿ü’KξhÑ".\Èßÿþw²²²8sæ Ÿ~ú)wÞyçe}.j»½0þ|RRR(++ãðáÃÌž=Ûæö^yåÞzë-N:Evv66l`éÒ¥õ¾/ÕuéÒÅR訮¶û0cÆ öíÛGii)™™™¼üòË6 H#FŒ %%…;î¸77·ËÎüä“OòÌ3ÏOaa!{öìáž{îá°¬SQQ§§çe?‡ˆ\šÐÐP/^ÌÑ£Gy÷ÝwÉÈÈ _¿~Œ7ŽC‡ODZ¨²²2MiH>>0düãF'iÖ³žRJÏx££ˆÔ¿ÎMÓAÒÒà™gàáS'7¾ÿÞètÒÌ<ÅSàñ‘ÑQD䎎Ž*„ˆÔ#BDDDZ˜³gÏ’˜˜H·nÝ.{•ÿÏÞ}‡GUçíO*é„NBIB¯Ò4*E ,.U%®¢ðÊåAYA "MY,€¨´Ø@XH/ Bè-F’ïï<™%¤0©'$÷ëºæÊÌ™3ßsŸ2I&9ŸóÉÈ`òäÉ´lÙ’½{÷²råJ6mÚÄÀ‹­ëˆˆ8&«c†ÍfË·{F£FX½z5Ÿþ9!!!óÙgŸ±zõj6l˜ëxùMst<€¾}ûb³Ù¸ûM¿ë®»puu¥_¿~¯ïŒ3øÇ?þ··7£GfÖ¬YyfÌkzPPŸþ9K–,¡AƒÔ®]›I“&1sæLîºë®|—_”ñ-ZDrr2ûÛßðññaÀ€ôïß?Çx!!!¬^½šÏ>ûŒàà`êׯς øøã ½lG“«ÝqÇlÙ²%ÇtG×#** ºu놯¯/]ºt!99™?ü0ǘNNNøúúòØcåx® û¶U«V|ôÑGLœ8‘jÕªqçw‘­`娱c4nÜØ¡m "ÅÇÕÕ•Aƒ±yófV¬XÁîÝ»iÑ¢S¦L!#CWê‘Ò•’’¢!"¥íž{`Ý:¸tÉê$"""""–[ÈBîáªQÍê("¥ÇÏ/³;È¡Cðïgv ¹ùfèÐ-‚´4«J9МæÜ˽¼Â+ŒÕqDä*®®®¤é{½H‰QAˆˆˆHsèÐ!’’’ ]röìYúöíË”)S˜>}:Û¶m#,,¬˜SŠˆ£Œ1ÙnùiÕª«W¯&!!„„V¯^M«V­ò/¿iŽŽжm[âããst~ðòòâòåË´iÓÆáõˆˆ ::š””8@xxx¾óšÞ¨Q#>ûì3âââHHHà×_eÀ€×]~QÆ«Q£‹-âÔ©S¤¤¤ðçŸ2pàÀ\ÇkÑ¢ß|ó ÄÇdzvíZš7o^èeä8É2`À>úè£Ó];åË—söìYRRRˆŽŽfÆŒøøøäó‹/¾àÖ[o¥víÚ9ž+è¾íÒ¥ ?ýôIII9r„ñãÇg{~ùòåÜ{ï½m)ááálß¾×_É“'Ó¿Î;gu,©@Ô!DÄááž«W_wV›ÍFzz:¯¼ò AAA¸»»Ó¸qcÞ}÷Ýó®ZµŠÎ;ãåå…——;wæë¯¿.‰5)ûÙÏf63’‘×7¿ßo·oßN:uì'VþøãtêÔ‰J•*Ä‚ ²ÍÏ3ÏLLL =öC‡å·ß~+©5)’…,$@îàŽëÎ;vìXÞzë­\Ÿ›9s&ÿüç?qqqaß¾}Üwß}Œ7ŽÓ§O³lÙ2¦L™Â?ü`Ÿøðᤥ¥ÅÅ‹‰‰‰aìØ±Ìœ9³¸VM¤àBCaÙ2Ø·ÂÂ`òd¨W/³“È_Yî†WQ?[ßÄM„Æ+¼bu¹Š««« BDJ BDDD*˜óçÏãääDåÊ• üÚÿùŸÿáÈ‘#üðÃtèСÒ‰ˆHYòÚk¯ñã?–è2BCCéÚµ+mÛ¶-Ñådùàƒðòò*•e‰ÈõuìØ‘ï¿ÿžÃ‡3a«ãˆH¡!"0¾þ’“¯;kÍš5™2e 6ÄËË‹nݺ1sæLÞ|óMû<Ó¦McòäÉÜÿýT¯^5j0dÈ&MšÄÔ©SKrMDDDDD %tþÍ¿Áœ¹þE÷î»ï>öìÙî]»²M?wîŸþ9‘‘‘L™2…ñãÇ3xð`|}}éÔ©o¾ù&¯¿þºý5ëׯgâĉԯ_WWWªT©ÂwÞÉÚµk‹w%E £Axûm8~<³(dÅŠÌÎ!aaeuºVEþlý/ð+¿…Ž‘²ÂÕÕÕÞÙLDŠŸ BDDD*˜ .àëë‹“SÁ~ HNNfþüùL˜0   ’ '""eŠ‹‹ óæÍ+±ñ1¤¦¦fûóˆT<ÁÁÁüÏÿüóæÍ#%%Åê8"R¨CˆˆE €Ë—áÛo¯;ëˆ#rLëÒ¥ û÷ï·?Þºu+aaa9æ gË–-EŠ*""""RÖ²–XbÎp‡æwuuåÑGÍÑ%dþüù„……Q£F 6oÞLxxx¶yºvíÊöíÛí›7oÎSO=űcÇŠ¸"%È×7³;HLLfQȹsг'tè‹N$.ŠüÙºèIO¦0Åê("òœU"R‚T"""RÁÄÅŪ;Hll, „††–@*‘OÏž=­Ž RnÜzë­ÄÇÇsôèQ«£ˆHššª‚+BûöðÕW×5·‹‘øûûsáÂûãS§NÙO€»ZÍš59yòd‘¢Šˆˆˆˆ”„…,¤+]iHC‡_É_|ÁÙ³gHOOç½÷Þã±Ç³ÏsôèQ7nŒÍf³ßªT©Â‰'ìó,Y²„3gÎаaCš5kÆÈ‘#Y¹r%Ƙâ[A‘ââä”ÙdóføýwhÞ|êÕƒ‰áüy«Þ*úgëxøl´:Šˆ6›Íê"åš BDDD*˜ôôtœ¯ß‚øZ5jÔÀÅÅ…}ûö•@*©ˆŠëÃÞ¼yóhÔ¨Q±ŒUPYT/Êë¥bÛµk·ß~;>>>E>žŠKYÉq­ÒÈTÐedddä˜V÷iE©«ÊÜÀöîÝ‹««+5kÖ´:ŠˆT)))¸¹¹YC¤bêÛ¾þúº³9ÒÙ¶fÍšœ>}:ÇôÓ§OS«V­BÅ))ç8Ç*V1’‘z]µjÕ¸çž{˜3g+W®¤V­ZtêÔÉ>¯¯/ÇǓ햞žnŸ'88˜åË—Ç’%KèÒ¥ S¦Lᡇ*ž))íÛgvÙ¿† ƒwÞúõáá‡aï^«Ó•iý³u7ºq+·ò/þeu‘§‚‘ ÆÓÓ“¤¤¤¿Î××—»ï¾›—_~™„„„H&R8ß|ó ýúõ³dÙºj’Õˆ#xä‘G8{ö,ëׯ·: ãº ¾ýöÛÓÊâ>­(n½õVÆou )„øøx^~ùe €ÕqD¤P‡ õí GÂîÝEª]»v¬Zµ*Çô/¿ü’víÚy|‘ⴘŸâʽÜ[à׎;–Ù³gsåÊfΜÉã?žíùÛn»•+W:4–»»;7Ýt‘‘‘¬^½š¥K—8ˆ%‚ƒaêTøë/xåX·Z´Èì$euºVyÿlý/°†5üÆoVG)Q*©`<<-ËÎ;éß¿?>>>øøøÐ§OvîÜ™cY_~ù%·Ür •*U"((ˆqãÆŸmž¬q³–a³Ù²µ¤vtY7n$==îÝ»gïZ¹MÏš¶{÷núô郯¯/ÞÞÞôïߟ={öäc×®]ôë×oooüüü0`@ž'—æ6Èk]‹c_^ï¸É«_¿~ö±úõë—m¬«×ï‡~`É’%Ù¦$W~ëwøðálãœëv+Žu½:ßÑ£G¹ë®»ðññ¡fÍš 2„sçÎå˜7-- l6Ûuÿ¨êèqåèò¡`ÇõµŠëX»xñ"ãÆ#$$„J•*Q»vmFŒÁ¯¿þšmYW/óÚ÷KA¶M^y¯·Œ¼ÖÿÚiŽîÓ‚î«â~ßÇ~qd(úþ)È{{È!¬X±"Ïí.eËO?ýDÛ¶m‰eݺuZID*u±³3ôî],!7ß|3ï¿ÿ>o½õõë×§~ýú¼õÖ[,\¸N:CX‘ⱕ­lc#Yè1ÆŽË«¯¾Ê¨Q£r\ä E‹|óÍ7,]º”àà`ªW¯Î”)Sxâ‰'ìóLš4‰+VЦM|||¸õÖ[IOOç“O>)t&K99evY¿¶l–-á±Ç2;‰Lœ¹ü¯ErªŸ­ŸçyV²’?ùÓê("""%LjˆH`0KÍR«cH9˜¥KKîxš3gNŽiQQQ0§OŸ.ô¸/^4÷ÝwŸqrr2æÐ¡CE‰)%lß¾}¦I“&fýúõæòåËf×®]æÖ[o5×þ*˜ž={šM›6™ÄÄDóÍ7ߨç9pà€©U«–™={¶9}ú´9{ö¬ùè£LHHˆ9zôhŽqæÌ™c._¾lNœ8a† f†ž#W^¿ŠdYO<ñ„¹óÎ;7·é€iÙ²¥ùä“OÌÙ³gÍ©S§Ì¼yóL½zõLLLŒ}¾ƒšºuëš ˜S§N™3gΘ>úÈtéÒ%ÏqKkäµ®EÝ—Ž70uêÔ±o—¬íh8`Œ1&--Í4hÐÀœ?>[ÆãÇ›*Uª˜„„„¯s~ë·cÇÓ±cÇlógdd˜† šm۶庽Šk]¯Î׫W/³zõjoŽ9b `FŒ‘ë¾rTAŽ+G–_Ðã:¯LE=Öîºë.3qâDsòäI“œœl¶nÝjn¹å–\¿GǶÉ+ïõ–‘×r™–ßëÙWÅý¾-®ýR}WÔýãè{{ëÖ­9~.HÙsäÈiœMxxxŽŸ×óûï¿›ßÿ½„Òa"""Jlü«åöûºˆ”¼:˜§Ÿ~Úê"×GãæfÌ¥KV'‘ ÊÑÏb%ýùSD*ŽÇÌc¦±il2LF¡ÇHLL4•*U2±±±Å˜L¤œ9~ܘ—^2¦JcÜÝ:Ô˜]»¬N%Ë0¦µim›ÁVG©ÐJóÿo"¥­4ÿ~×ù¦*) „Hq²¢ äÈ‘#0›6m*ÒØfÉ’%¦aƦR¥JfìØ±æàÁƒESJÆàÁƒÍ¢E‹²MÛ»wo®'ì~ÿý÷¹ŽñüÃL›6-Çô ˜1cÆä»üóçÏ›*Uªä˜ž×IÓYVÓ¦MÍ{ï½çиy¸½dÉ’Ó§OŸžídå!C†˜·Þz+Ç| .tèäï’ܹ)Ž}éÈqóü#×í2}út3dÈûãçž{μûî»ÙæyõÕWÍ?ÿùÏçºÞúcLÛ¶m³ þÍ7ߘÛo¿=Ïù‹s]³ò­X±"Çx9^_”ýüŽ+G–_Ôã:kYE=Ö¼½½Í¹sç²Í] ‚kå·mò;v¬(qd_÷û¶¸ö‹£ûîZ…Ý?޼·OŸ>m7nœïòÅ:0cÆŒ1îîî¦Q£FfÙ²e&#£àÿWAˆˆUëÖ­Í‹/¾hu ‘ŠëÔ)cl6c¾þÚê$""RA© DDJSŠI1ÕL53ÕL-ô.\0/½ô’‰ŒŒ,Æd"åX|¼1sæÓ´iæçÏ=ŒùòKc ñ÷h)>1Ÿgãlöš½VG©°T"åYY(q*tk¹!Õ©S8P¤ql6ƒ b÷îݼñÆ,_¾œÆÓ¯_?¾úê+222Š)±Õwß}ÇwÜ‘mZ“&MÈü1»¼Ú¾FEE1hРÓû÷ïÏúõëó]¾¿¿?çÏŸw8¯£Ë:|ø0{÷î¥_¿~›=zä˜6`ÀÖ­[g¼~ýzî¾ûîóõéÓÇ¡e”Ô6ÈOQ÷¥#ÇMTTT®ÛeÀ€DEEÙ9’… f›çý÷ßgÔ¨QΕ%¿ÅÇgΜ9öÇï¾ûn¶¶è×*ÎuÍÒµk×lƒ‚‚8qâDž #¿ãÊ‘åõ¸ÎRÔc­K—.„‡‡³fÍÒÒÒÎõ{”£òÛ6e­½µ#ûª¸ß·Åµ_ »ï »yoûùùqNmàË”ôôt¾üòKúôéC“&MX±b3fÌ`×®]DDD`³Ù¬Ž("PJJ nnnVÇ©¸jÔ€fÍàÇ­N"""""Râ–³œ8âÊÐB½Þf³ÈŽ;xýõ׋9H9åí ‘‘°k¬[•*Ax8´isçBR’Õ ¥”EAC2iVG)*©`l6 6dß¾}Å2ž««+£G&&&†µk×âììLxx85kÖäá‡æ§Ÿ~*ÒI½RtgÏž¥ZµjÍëéé™ëôsç΄ÍfËv«U«‡¶ÏwæÌzè!êÔ©ƒ‹‹‹}¾‚ptYß|ó Íš5£~ýúÿZU«VÍ1­víÚœ9sÆþøìÙ³ÔªU+Ç|¹M+ÍmŸ¢îKGŽ›¼¶KíÚµ9{ö¬ýq£FðôôdÇŽlذ___ÚµkWèuÎkýî¿ÿ~–-[Fbb"ÑÑÑÄÄÄä[8TœëšÅßß?Ûcww÷"}/,èqåÈò r\秨ÇÚ§Ÿ~J§N=z4UªT¡k×®¼ýöÛ\¹rÅ¡åtÛäwìXÁ‘}UœïÛâÜ/ŽÌSœûÇ‘÷¶‹‹ /^ÌwH騲e cÇŽ¥^½zÜ}÷ݤ§§³dÉ:Ä£>Š«««ÕE¤KMMUAˆˆÕn»÷ ùL IDAT ¾ÿÞê"""""%n! éC(Ôë1\¾|™/¾ø__ßbN'RÎ99A°jüñtî cÇBPŒÇ[PJ‰3ÎŒg<‹YL 1VÇ)v*©€:vìȦM›ŠuL'''zôèÁªU«Ø»w/>ú(6làÖ[o%((ˆ§žzŠ~øÁá|¥øT­Z5×Ö ¢Zµjœ?cLŽ[bb¢}¾¡C‡âååÅO?ýDrr²}ž’XÖ7ß|“ëIþ6›”””lÓòëÎqêÔ©ÓNœ8AõêÕ³e:yòdŽùr» }inƒÂptlGŽ›jÕªåÚõâĉ9NJ9r$ ,`Á‚Ùºƒ$—#ªW¯Î-·ÜÂ’%K˜={6cÆŒÉ÷ôâ^×’PÇÕµ r\v|Gö©ŸŸ3fÌàСCDGG3zôh–,YÂàÁƒZNIl›²¦¸ß·Åµ_™§8÷#ïí´´4üüü 5¾Mjj*ßÿ=O>ù$õêÕ£C‡¬_¿žÈÈHöíÛÇúõ뉈ˆÀÅÅÅê¨""¤¤¤àîînu ‘Š­[7غââ¬N"""""RbŽqŒ(¢ÉH«£ˆH›60g> < @H ;wZNJÁ?øu¨Ãë¨Û’ˆˆ”?*©€ºwïÎÏ?ÿ\äËóÒ¸qc^~ùeöìÙömÛ2d«V­â¶Ûn£ZµjÜ{ï½Ì›7£G–Èò%»îÝ»•mÚöíÛiÞ¼¹ÃcôîÝ›~ø!Çô7fëò°yóf^~ùe‚‚‚ì'|æuœåu‚¾#ËJNNæûï¿Ïµ ¤V­Z9r$Û´ 6äº,€µk׿˜¶|ùrzõêeÜ«W/V¬X‘c¾o¿ý6Ç´ÒÚ…åèØŽ7=zôà‹/¾È1ÖòåËéÑ£G¶i¬\¹’3gΰfÍî¿ÿþBårÔðáÙ5k+W®dذaùÎ[ÜëZ r\9ª Çua8ºOm6±±±@f±Â Aƒøúë¯Y·n]¶×åõ~)ÎmSÐn>¥¥8߷Ź_™§¸Ýë½·/^¼˜kç')GŽaΜ9 0€jÕªqûí·óÍ7ß0lØ0¶oßÎîÝ»y饗hÔ¨‘ÕQED²Q‡‘2 {wÈÈ€b¾`‰ˆˆˆˆHYòP™Êô§¿ÕQD$KÍš0q"ÄÆÂܹ™+hÕ BC3;‰”³‹ŽÉ¹âÊÓ<Íû¼Ï1ŽYGDD¤x) f©Yju )'³tiÉOsæÌÉuúÑ£G `¾ýöÛ[vn¢££Íœ9sLDD„ñõõ5€ 1C‡5sæÌ1ÑÑÑ¥š§¢Ø±c‡iРY¿~½IHH0Û·o7íÚµ3ï½÷^¶ùòûÕ0&&Æ´jÕÊ|úé§æìÙ³æÒ¥KfÕªU&00ЬX±Â>_ï޽ͨQ£LLLŒIII10Æ Ëuì:uê˜Í›7›ÔÔT³víZS§N‡—µzõjãããcRSSsŒ;lØ0sß}÷™£Gšøøx³fÍÓ»wï\3ææ›o6ÿþ÷¿ÍÙ³gÍéÓ§ÍüùóM½zõLLLŒ}¾C‡™ºuëš ˜Ó§O›sçΙ¥K—š›nº)Ǹ¥µ òSûÒ‘ãfÿþý&00ÐÌ›7Ïœ:uʾýÍr,{äÈ‘¦W¯^æÁ,t®ë­_–””S¥JóüóÏçxíµ¯/îuÍ+_^Ç # r\9ºü‚×y)Žc 0½{÷6;wî4ÉÉÉæäÉ“æ¹çž3 È6^^ï—âØ6×[FAÖ¿ ³ÝWÅù¾-ÎýâÈ<ŹŒÉû½eëÖ­æÎ;ï¼î8R8Ç7Ë–-3‘‘‘¦yóæÆf³Ó£G3uêT³{÷îRÉñûï¿›ßÿ½Äƈˆ0%6þÕòú}]DJ–™?¾Õ1D¤eKcž|Òê""R9úY¬¤?ŠHù–a2LCÓÐŒ3㬎""ùÉÈ0fýzcî¼Ó›Í˜FŒyë-c._¶:™”€d“lM ¾7‹X 4ÿÿ&RÚJóïyoª‚‘RAˆ'« BŒ1¦aÆfüøñ%¶ìëIJJ2ëÖ­3/¼ð‚ 5îîî0AAAføðáföìÙfëÖ­æÊ•+–e,O6oÞln¹åS©R%S·n]óꫯf{>ë$ùÜN–ϲÿ~sï½÷???ãååe:vìh¾øâ‹lóœ:uÊ :ÔÔ¨Qø¹¹™–-[š¥K—æ:î²eËLpp°qss3 64+W®txY?þxޓųœ9sÆÜÿý¦zõêÆËËË„……™#GŽäš0111æÎ;ï4>>>ÆËËËôíÛ7×ZwîÜiúöík¼¼¼Œ···éÕ«—ÙµkWŽqKk䥸ö¥1×?nŒÉ<½OŸ>ÆËËËxyy™>}ú˜;väºÜ 6ÀüòË/…ÎåÈúcÌ•+WLPP9~üx®¯/©uÍ+[nÓ]c?® ²|c?®sS\ÇZTT”¹ûî»MÕªU››› 6ãÆ3—.]Ê6_^ï—Âl›¼òæ÷žÌoý ²Œü^Ÿßôâ|ß×~qdžâÜ?ÆäýÞÎ2þ|óöÛoçùzq\jjªÙºu«™={¶6l˜ 2€qww7·Þz«yñÅÍúõëMrrr©gSAˆˆ•»»»Y´h‘Õ1DäÑGéÜÙê""R© DDJÃæƒÁl3Û¬Ž""ŽÚ·Ï˜1cŒñð0¦zucž}Ö˜£G­N%Ål†™a<§9eNYE¤BQAˆ”ge¡ ÄöOŠˆˆƒlØXÊR2Ðê(RØl6–.]ÊÀ%s<Í;—ÈÈÈ\Ÿ{öÙgYºt)111Øl¶Y~A$%%ñË/¿ðã?²qãF~ûí7.]º„‡‡íÚµ£S§NtêÔ‰Ž;R&2‹u5jijÏ>ËC=T¤ql6úu¸|Z¶l_}õ‹-²:Šˆ£ë½·»wï΢E‹¨W¯^)'»±c8tè¿ýö¿þú+¿þú+üñIIIøúúÒ±cGºvíJ·nÝèÔ©–æÝ²e íÛ·/‘ñ³~7_¶lY‰Œµü~_‘’ãääÄ’%KJ쳸ˆ8hÑ"ˆŒ„‹ÁÝÝê4""R8úY¬¤?ŠHù6‚ìb¿ñ›ÕQD¤ NŸ†… áwàìY¸ë.xòIøÛ߬N&Å ‘D‚b£˜Â«ãˆT¥ùÿ7‘ÒVš?Èë|S—_²ˆˆˆ”IÆ ãµ×^cÓ¦M„††ZºwïN÷îÝíÓ¢££ù駟زe ¿þú+ï½÷)))øøøÐºukZ´hAóæÍiß¾=íÛ··üäD)=°:‚”Q6›_~ù…iÓ¦1þ|«ãˆH1qä½½xñb:tè bë¸rå û÷ïgË–-lÙ²…Ý»w³mÛ6Ξ=‹‹‹ 7¦}ûö 8ÐÐPÚ¶m‹“““Õ±EDŠMjj*ÆÜÜܬŽ"";CJ lß:YFDDDD¤Ø$Àç|Î4¦YED £F xöY7–,7ÞÈü {Ë-0v,Üs8;[R ÉOÆ0†7xƒgx?ü¬Ž$""Rd*© Z´hAëÖ­Y¼xq™(ÉMHH!!! 6 Èì"²}ûv¶mÛÆ¶mÛøã?X¼x1‰‰‰¸ººÒ¼ysû­iÓ¦4mÚ”ÆëDÉUV—u )BCC=z4mÛ¶µ:Šˆ£ë½·7lØÀ{ï½Wʩʮ””öïßÏÞ½{Ù»w/{öìa×®]ìÙ³‡+W®àééI«V­hÓ¦ ÷Ýwmڴᦛn¢R¥JVG)Q©©©¸«ˆõ5‚ªUá—_T"""""åÊR–’Fƒlu) 776,óöÓO0m !!ðøãðÐCàåeuJ)„Çyœ7xƒ9Ìáž±:ŽˆˆH‘© DDD¤:t(S¦Lá7ÞÀÛÛÛê8×åááAçÎéܹ³}Zzz:û÷ï·‰ìÞ½›>ø€Ã‡“žžŽ³³3ÁÁÁö"‘&MšØïW®\Ùµ«©¤|Ò~)ŸyoÏ;·’”=.\`ß¾}ìÞ½;[ñGLLL¶ß…š5kFŸ>}?~¢££‰‰‰áðáÃìß¿Ÿµk×Kzz:ÞÞÞg»Ùïûøø”ö*‰ˆˆHOLLL®·Ã‡“dv?«S§Žý÷Š»îº‹ÿ÷ÿþ!!!4iÒ„ªU«Z¼&""76u)cZ·c`ÇèÚÕê4"""""E’N:‹YÌHF⌳ÕqD¤4¸¹ADDæí§Ÿàwà`âDxøáÌ›.€Pfg<ïó>ŸñçïVÇ)4„ˆˆˆTpuêÔáþûïçÕW_åþûï¯ÐWI­V­ÕªUã–[nÉñÜ•+W8räˆý¤Í¬8ùå–,Y©S§²“U$H½zõ¨]»6uêÔ!00€€|$""Rޤ¤¤püøqŽ;Fll,'NœàÈ‘#;vÌþ;ùsçìóתUËþ»Bxxx¶"ÓºuëâêêjáÚˆˆ”oê"RÆÔ©U«ª DDDDDÊ…5¬áÇÎp«£ˆˆBC3o‡Á¼yðê«0e Œ ãÆAPÕ å!„p/÷2•© b6lVG)„ˆˆˆ“&M¢iÓ¦¼ûî»<ñÄVÇ)“\]]iР 4ÈõùÄÄÄWúމ‰aóæÍ,[¶ŒS§NÙ;ŒÔ¨Qƒ€€€lE"uëÖÍV8âïï_Z«'"""y¸pႽÐãøñãÄÆÆrìØ1Ž?ÎÑ£G9qâ§OŸ¶ÏïììLÍš5©[·.tíÚ•áÇg+úððð°pDD*6u)ƒZµ‚?ÿ´:…ˆˆˆˆH‘-d!ÝèFrÿ¢ˆT ÀÔ©ðüó°p!̘ï¾ ýúÁرУ‡Õ å*Ïó--S§NÙO½údÒ}ûöñÃ?pôèQí¯ñðð°‰Ô­[—€€{ñHÍš5©^½:5kÖTለˆH!\¸pS§NqæÌN:eïðqu¡ÇÑ£GIJJ²¿ÆÓÓÓþ3¹N:´lÙÒ~?ëk­Zµpvv¶pÍDD$?ê"Rµn ¿þju ‘"9Ç9¾â+æ2×ê("RVøúf€<þ8|ýuf‘HϞо=Œƒƒ:†[î&n¢½˜Æ4„ˆˆÈ K!"""ÀóÏ?Ï¢E‹˜8q"ÿû¿ÿkuœrÇÅÅ…ÀÀ@ó/))‰'NpüøqNœ8Att´ýþ¦M›8qâýõW¶n#þþþÔ®]ò¼_§Nø$""åÖ… 8~ü8.\°ÿ<Íí~ll¬ý ñY²~–P¯^=:wîLHHˆ}ZíÚµ©]»66›Ú…‹ˆÜÈÔ!D¤ jÝ,€Œ pr²:ˆˆˆˆH¡|ÄG¸âÊ=Ücu)kœœ ,,ó¶e ¼ý6<ø Œ‘‘™Å!UªX²BÏxnã66±‰[¸Åê8"""¦‚2¯xýÊ+¯ðÀ0räHþö·¿Y©Bòðð $$„<çIKKãÌ™3œ9s†“'Orúôé÷7oÞÌÉ“'9{ö,ÉÉÉÙ^_µjUjÔ¨aï0’Õm¤FÔªU‹ªU«RµjUªT©B•*UT@"""–HMMåüùóœ;wÎþõĉlß¾WWWN:•­ÃÇùóç³½¾R¥J9~ÖµhÑ"ÇϽêÕ«S½zu\\ô'‘Š@BDÊ –-áòe8|òù{ˆˆˆˆˆHYö0˜Áxãmu)ËÚ·‡E‹àå—aÎxç˜>î¿ƃ¦M­NX!u§;7s3¯ñ+Yiu‘ÓÙ"""b7tèP>ùä†Îü‡‡‡Õ‘$...ö«”·nÝúºó'%%å{¥ô½{÷òÝwßqáÂN:EFFF¶×WªT ÿ|oWw"ñ÷÷§Zµj:ÁJDDìrûYt½ÛÉ“'1ÆdÇÍÍÔÔTÜÝÝ©W¯Íš5£K—.¹vÆR7É:„ˆ”Ag~=xP!""""rCÚ¶³Ù̶:ŠˆÜ(‚ƒaêTxáøä˜1æÏ‡~ý`ìXèÑÃê„Î3<ð“´¤¥ÕqDDD D!"""bg³Ù˜?>­ZµâÅ_dúôéVG’bàáᇇ´oß>ßySSSíWb¿úªìW?>wî±±±lß¾Ý>=111ÇX¾¾¾T©R…jÕªÙ»\ÝyÄÏÏ???|}}ñ÷÷·ß÷óóÓÉY""eLJJ /^äÒ¥K\¼x‘ .d{|íϬŸçÏŸçÒ¥K9ÆóôôÌñs! €V­Zåú3#ë±››»víâÓO?eÑ¢E|ùå—}:=ôááátëÖÍêHRŠÜÜÜìÝG "999ß’¬û;wî´?wñâEr¯R¥Jöâ???*W®l¿uáHÖýÊ•+S¹rålÓ+éc""@fwެÂK—.G\\\¶‚Žkï_¸p!Ûk’““sÛÛÛ??¿l…õêÕ£M›6T­Z5×¢Ž*Uªé{t‹-hÑ¢&L`óæÍ|úé§¼÷Þ{Lž<™.]ºÁàÁƒ©Q£F¡—!""åWjj*...899YED®Ö°af‡‘L*©,e)Oó46Ô±XD ÉÉ ÂÂ2o[·Â[oÁcÁ„ ðÏfÞ¯VÍê”åš Oó4ò “˜DAVGq˜ BDDD$‡‘#GòÅ_0räH¶lÙ‚¿¿¿Õ‘¤Œ«T©èu¹^iþÚûY'.?~œ½{÷f›'·+ÏCfqËÕE$ÞÞÞxzzâíí¯¯/žžžxzzR¹re<==ñðð råÊxyyááᯯ/>>>xzzâåå…ŸŸŸN‘—õ}1!!¤¤$âãã¹téIII\¾|™¸¸8IJJ"..ŽË—/Û >âããIJJ"!!¸¸8û÷ÑÔÔÔ\—•UDwí×üýýsßåÖÙÉÊï‹NNN„††Ê믿Κ5køä“Oxþùçyúé§éÝ»7<ðaaa¸¸èÏ""’)%%E EÊ¢F2;„ˆˆˆˆˆÜ`¾à âˆcC¬Ž""åE»v°hL›sæÀÌ™0u* Ï> -ZX°ÜÌ`&0éLg&3­Ž#""â0!"""¹úðÃiß¾=ƒ bõêÕ8;;[IÊ!'''üýý‹TtdŒ±_í>·‚’¬«á_¾|™ÄÄDâãã‰ÍóäêüTªT üýýíE$YÅ&øøød+6ñööÆÕÕ\\\ì'Oûûûãä䄟Ÿ...øøøàêꊷ·w¡·ƒˆ”Ž„„®\¹Â¥K—HOOçâÅ‹dddpáÂ{1GZZñññ¤¦¦rùòå<‹6.^¼HRR‰‰‰\¸pÄÄDRRRò]~åÊ•ñðð°´yyyáéé‰ö"¶¬ÎJ×{\ÝyÉf+?W«sss#<<œððpX±b‹/æ¾ûî£fÍšŒ1‚‡zˆ«£ŠˆˆÅRSSqss³:†ˆ\«Q#ø÷¿­N!""""R` YH_ú@Á.š&"r]µkÃĉðÔSðñÇ™]CZµ‚;î€1càÎ;¡ý¯§,pÅ•'y’gx†xZÔ²:’ˆˆˆCT""""¹ªR¥ K—.¥k×®L™2… &XI$W6›­ÈE%W»ºpäzWåÏš/>>žÄÄDΜ9C\\IIIö“¿ÓÓÓ‰‹‹ÃãÐòÝÝÝíÅ&•*UÂËË 777{Q‰¯¯/ÎÎÎT®\'''*W®Œ³³3¾¾¾ö¢›ÍFåʕ̓¤½¼¼ìcöíuu!Ч§§®–,eZJJ ‰‰‰À 3²ŠÂ{d¾—³:c\¸p€øøxÒÒÒìEqqqö¯ééé\ºt‰+W®`_Vrr²½hãzÅY² ¾²Þ›Y~~~öŽ5jØŸË*ึ[QV¡ÇÕÝŠäú¼½½2dC† áØ±c,^¼˜Ù³g3uêTn¾ùf† ÆÐ¡Cñðð°:ªˆˆX@BDʨ† !&22@Ý9EDDDäK,ßò-Ÿò©ÕQD¤<óö†ÈHxè!øî;xûm¸ë.hÜy$ó9ýÏ£Ø<ȃLf2ïò.¯ðŠÕqDDD¢‚ÉS§N˜>}:cÆŒ¡cÇŽôíÛ×êH"%ÎËË ///ªW¯^ìcç×A ëäó¤¤$’““í'³çu{LLLž'°gM/ЬîYL{‘ `ïv’uÂ;ü·ƒJ–¬â•,W§ä5OnR²Ša²\)KV7–keØTÖú•YÇLAåUÀu¼\-ë8Ë’u|^-«£ ó\{,f߀½`êêy²Š/®ÎTØõ¿ZÖq–_•‹‹ !!!öc:ë8Ë:n³Žï¬Â©ü:IÙȳÏ>ËÓO?Íwß}Çܹs=z4ãÇ'""‚Ç{ŒV­ZYSDDJ‘:„ˆ”QuêÀ•+pú4ÔÒÕ7EDDDäÆðP™Êô£ŸÕQD¤"pr‚=2oÛ¶ÁìÙ0~<üë_0rdf×u+**O<ÃÞà žæiüð³:’ˆˆÈu© DDDDò5zôhþóŸÿ0tèP6nÜH³fͬŽ$rÃÊ:Q¼jÕª¥¶ÌÜNÀ¿úû«;.d€ŸWÇ…«OöÏê¸puqÁ¹sçì®#·,Y²ÆÉruAÀ&·‚—k]ݵâFsuAP–k‹®- ‚ÿAö€€{OÖ8yu´É#¯Ž6YÅy0‰899Ñ£GzôèAll,ï¿ÿ>ï¿ÿ>óæÍ£k×®<öØc 0 [šˆˆ”Oê"RFf~=vL!""""rC0>äC†1 wô9SDJY›60gLš”Y2k¼õ O? ºV‘<Îã¼ÁÌaÏðŒÕqDDD®K!"""r]sçÎ¥gÏžôéÓ‡M›6Q§N«#‰ˆƒ<<<ì'èߨ âââ0ÆØ_]Ä’×<×›žŸÜÆwDV—ü899áçWð«È\]üà(›ÍFåÊ•Êpíøy½V¤<¨S§&LàÅ_dݺuÌ™3‡AƒQ¿~}ÆŽË<£ ‘ˆˆ”ê"RFe]ÁôØ1hßÞÚ,"""""ØÀrá ·:ŠˆTd5kÂĉðÜs°t)¼ö´n ·ÜÏ> wÞ ÿwá6qœ~Œb3˜ÁÆP ]„NDDÊ6„ˆˆˆÈuyzzòÕW_ѵkWz÷îMTTµk×¶:–ˆT*L‘’àääDŸ>}èÓ§ÑÑÑÌ;——^z‰ &0bÄž|òIêÕ«guL)f©©©ê"Ryx@•*pü¸ÕIDDDDD²…t 7q“ÕQDDÀ݆ ƒ¡CáÛoáí·á®» aC=FOO«SÞPžäIÞå]±ˆH"­Ž#""’/'«ˆˆˆÈÁßߟ5kÖ‘‘Ahh(´:’ˆˆˆH± aêÔ©9r„I“&±|ùr‚ƒƒ ã矶:žˆˆ£””u)«2;„ˆˆˆˆˆ”q $ð9Ÿ3’EÅv IDAT‘VGÉÎfƒ=`Õ*Ø»úöÍìcÇBl¬Õ oµ¨ÅP†2i¤‘fu‘|© DDDDÈÆ©V­¡¡¡üöÛoVG)6¾¾¾Œ;–ƒòÁK—.]¸í¶ÛXµjÆ«#ŠˆH©CˆH¨!""""rCXÂÒHc0ƒ­Ž""’·Æ3;…>œYòùçР ¿übuºÂxÆóñ9Ÿ[EDD$_*‘©V­ß~û-íÚµ£k׮̟?ßêH""""ÅÊÍÍ¡C‡òǰqãFªW¯Î€hÕªŸ~ú© CDDn`ê"R†U­ çÎYBDDDD亲 À«£ˆˆ\_ðì³ óæevéÜBCáÓO!=Ýê„eV!Ü˽¼Ê«ô¿!)»T""""æííͪU«xê©§ˆŒŒdÔ¨Q$&&ZKDDD¤Ø…††²lÙ2¶mÛF³fÍ4h:t૯¾²:šˆˆ‚:„ˆ”aþþgu ‘|íg?ÿá?Œd¤ÕQDD ÆÍ † ƒ;`ãF€Áƒ¡I“ÌN"—/[°LzžçÙÁÖ²Öê("""yRAˆˆˆˆг³3“'OfåÊ•|þùç´mÛ–ŸþÙêX""""%¢eË–|úé§üùçŸ4hЀððpÚ´içŸ~ju4)u)Ãüýá«Sˆˆˆˆˆäë}Þ'@nçv«£ˆˆ^h(,[–Ù-¤xþùÌ‘±cáȫӕ)7q½èÅT¦ZEDD$O*‘" ãÏ?ÿ$88˜ÐÐPž{î9’““­Ž%"""R"Z´hÁ²eËØ¾};7fРAtéÒ…U«VYMDD !"e˜ BDDDD¤ŒK#øˆxgœ­Ž#"Rt fv9~&M‚åË!8ÂÂ`óf«Ó•ãÏüÈ&6YEDD$W*‘" dõêÕÌš5‹Y³fÑ¢E )"""åZ«V­X¶l?ÿü3•+W&<<œnݺ±a룉ˆH>Ô!D¤ SAˆˆˆˆˆ”!—¹ÌyÎg›¶†5œàÃfQ*‘âç—ÙäàAX²Ξ…[n`Ñ"HK³:¡¥ºÓ›¹™×x-ÛôXbùÉ Ã¢d"""™T""""ÅÂf³ñðógÏ:uêDxx8aaa8pÀêh""""%¦S§N¬^½šM›6áêêJ·nݸûî»Ù¿¿ÕÑDD*¼íÛ·sß}÷1pà@ÈÃ?ÌöíÛÙ²e ãÇgüøñL™2…7ß|“¤¤$«ãŠHåÊpùr…?ÉDDDDDʆ§yššÔdøš¯I'…,¤;Ýi@«ã‰ˆ” 77ˆˆ€ÿü6n„xàhܦM«Ðrx†gXÅ*v²“?ù“a #˜`F0‚C²:žˆˆTp.V‘ò%00O>ù„ÈÈHüqZ´hÁƒ>È„ ¨]»¶ÕñDDDDJÄÍ7ßLTTQQQ<õÔS´lÙ’‘#G2yòdjÔ¨au<‘ éÌ™3|þùçöÇ®®®öéY/HOO'-->}úЬY3KrŠÈÿñôc ) ||¬N#""""\q¤“ÎW|Å VP•ª¤“Îsø û÷ïgâĉÌ;—   žzê)Nœ8au<‘áìį̀Q£8pà>ú(ãǧ]»vüøãVG©P\\\¸ûî»íArãììÌÃ?\Š©D$Oîî™_SS­Í!""""$‘”cZÖÉ¿ò'ò(5¨Áw|WÚÑDD¬áë cÇfv Y±Ο‡ž=¡}{X´®\±:a±û’/©O}îâ.¶± À^(˜ÅNqÊŠx"""v*‘W©R%žzê)>ÌË/¿Ì'Ÿ|BHH<ò{÷îµ:žˆˆˆH‰ðööfÊ”)ìÚµ‹zõêѽ{wÂÂÂ8räˆÕÑDD*Œ{ï½÷º]š\JiD$_ê"""""eH2Éy>—AC%*Ñ€¥˜JD¤ prÊì²iüþ;´h>õëÃĉ™…"åÄv¶K,C:é¹Îã„“:„ˆˆˆåT""""¥ÆËË‹'žx‚èèhÞ|óM¢¢¢hÞ¼9}ûöeíÚµc¬Ž("""Rì4hÀW_}Å—_~É®]»hÕªo¿ý6VG)÷zõê…‡‡G®Ï¹¸¸NµjÕJ9•ˆä*«Cˆ BDDDD¤ È­CÈÕlØø‚/¨OýRJ$"Reu9p† ƒwÞÉ, yøaسçú¯¿r}vì(ù¬…ð"/I$Nùœf›Nº:„ˆˆˆåT""""¥ÎÝÝþóŸìÛ·•+WråÊúôéC³fÍxíµ×8qâ„ÕEDDDŠ]XX»víâÑGå©§ž¢[·nìqä"""Rhîîîôïß—Ï¥¥¥ñàƒZJDr¥!""""R†¤ÿ拾™ÍmÜVJiDDʸ  ˜:þú ¦O‡„–-¡gOXµ*ï×-[³gCh(üç?¥×Q6lÌf6çï8ãœë:ubêÔ©ìß¿ßê˜""""EÌwß}Çĉÿ?{wÅ•õüÛì;4²ï4¸;îˆË؈ˆÑ n‰ÆÉ8øÎ$ÑÄ$bfÓÌ›¼ÑIÆÑdœŒ&f3‰J&n¸Ðqƒ£h4?ÀDdYe“};¿?œ®¡¡ª¡¡AÎçyú¡»úvÕ©ê¢úÞª{êâÏþ3æÎ‹¬¬,}‡Åc•¨¨(=¸(+‘H0yòdŒ=ZÏQ1ÆT(“b9!„1ÆcŒ MhžÁ¶°Å×ø–°ÔcTŒ16ˆyz;v¹¹@L pý:ÐܬZ†èÁcÝ:`çNýÄÙ˜à(Žb¦©ŒÒ‚”£\‘1Æê8!„1ÆcžD"AHH>øàÜ»w/^ÄŒ3°{÷nŒ9~~~ظq#’’’„>Œ1Æcƒ‰¡¡!¶lÙ‚«W¯¢ªª 'NÄ_|¡ï°c족xñb<8n``€èèh=GÄëD9BH»Ñ|cŒ1ÆÓeBˆÀ_á+xÀCÏQ1ÆØCÀÊ ¸wOsû_™òâ‹À–-ý›°À)œB‚TF )F±£bŒ16ÔqBcŒ1ÆCCCÌš5 »wïF~~>Î;‡ððp9r!!!ð÷÷ÇË/¿Œ . ¹ãÝ$cŒ1ƸqãÆáÊ•+xê©§°|ùr¬Y³µµµú‹1Æ=;;;„„„ŒŒŒ°lÙ2=GÄëD™Â#„0ÆcŒ± ÿ½Îx‡0Sõ cŒ=Dª«÷ßï<:ˆ:ý+ðì³@[[ßÇ¥KXâ$Nb$FÂ[ŠP¤ç¨cŒ eœÂcŒ±AËÐÐsçÎÅÛo¿ÜÜ\\¾|¿üå/qìØ1Ì;öööX¼x1öìÙƒ;wîè;\ÆcŒ1QÌḬ̀{÷nüûßÿF||<¦OŸŽ[·né;,Æô”I ?þ8lmmõ c¬“ÆÆMLôcŒ1Æcøï!oà ,Á=GÃc‘÷Þûï9€îï¾ ¬[´¶öm\Z’Bг8 ?øà„ÆcúÅ·YbŒ1ÆØCA"‘`Ú´i˜6mvìØÌÌL( ( üñijÏ> ___„††B.—#44vvvú›±>WUU…BÓ§OãôéÓ€°°0„……A.—sG8ÆÀ–.]Š   ¬X±Ó¦MÃG}„¥K—ê;,ưêëë‘››‹¼¼<äççwzž›› ‰D‚K—.aÑ¢Eððð€§§'¼¼¼Tž›››ë{Ušªªü•JõcŒ1ÆØC¬¾¾ €ÊÊJQWW¨®®Fkk+ZZZpÿþ}@MM š››ÑÖÖ†ªÿÔÙÚÏîß¿åˆo€Ê<¨|VÓgP__¯6ne ÚªªªB›–w•700€­­-ZKZaòoìøÝìÀŽn?gll +++µï™››ÃÌÌLxmddkkk•2¶¶¶00øï}}-,,`jjªqþfffBûUùYXZZ¬­­addCCCØØØ,--abb‰D"\'m?ÆëÇŽý7¹ÃÐðÁH¡D¦©Kúhk>üðÁÈ"ŸÞéFMMM¨­­Emm-šššPQQ!jZkk+ª««Uæ¨þƵÿýQþn*)˵9·Áð°!Ö¿¶Ï*žU‰­ãï¥:bŽÃG”¿U€êïCûy)ù`ccCCCáwBùžT*¦YYYÁØØXí4Æc'„0Æcì¡$“Éèèh477ã»ï¾Ã™3gpæÌ¼ÿþû000ÀŒ3 —Ë1{ölL›6Ov²‡B[[®_¿ŽÓ§OãÔ©S¸|ù2ˆS¦LÁÓO? 8uê>øàH$LŸ> .DXX&Mš¤r±1Ƙþy{{#11üãñË_þ›7oÆë¯¿CCC}‡Æcýª©© ÈÏÏGNNòóó;%}”•• åÍÍÍáéé OOOxxx`êÔ©ððð@KK ŠŠŠ““ƒ¬¬,\¼xyyy*Ž:%‹xxxÀÛÛpww.¦2Æt¨ª 05}ð`Œ1Æ"ˆ•••*V•T•IÊ÷ª««ÑÒÒ‚ÊÊJ!aC™D¡ì°ZUU…––TUU [Õ%ch£}'Té’wMMMaaa¡¶ Ð9áÁÀÀ>>>Z%<´×¾ã«6Ú'Hˆ¥ÜÞwÝ…s3 ·‹;×UòIOfŠ‹‹»L˜©««Cãî°_QQ¡¶Œ¶ììì ‘H„À¶¶¶022‚­­­°-•ßµ2áÄÎÎFFF°±±ö åw«ì€Ü¾s±”ÀcGééh-.FMv6šîÞEka!ÚJJ`PZ ãòr˜TWì¦Æí“¿ü{ýü°ËÊ ÕÕÕ¨¯¯Žݱ³³ƒ±±1¬­­…cTûä¸ö¿[vvvÂo\ûß*åqOIå·é2ùíˆùý“¼Ø±LûÄÍö¿)íŠŠŠ„ß‘ŽI ÷ïßGss3*++»\®’T*…¹¹9lmmaccØÙÙ©¼¶±±^·ÏÖÖÆ ãÄÆëcœÂcŒ±‡ž±±1fÏžÙ³gãµ×^CYY Μ9ƒ>ø[·n…‰‰ ¦L™‚Y³f!$$³fÍâ‘Ø QZZŠóçÏC¡P >>wïÞ…““æÌ™ƒýû÷#""öööBù­[·¢¦¦çÎC||<öîÝ‹?þñpppÀ¼yó —Ëwww=®cŒ1%###lß¾#GŽÄï~÷;üðÃ8xð Ê±1Æ»ŠŠ dffâîÝ»(,,ìô<''­ÿ¹C ±±1àææ™L†Ù³g Ï]]]áææÖ©£‘˜å«[öÏ?ÿŒsçÎ!;;[¥³ŽT*UY^Çç^^^*ˆc"TU|.†1Æcœ2颺ºUUUBÂFUUîß¿ÚÚZÔÕÕ¡¢¢B¸ yMM ªªª„×ÕÕÕ*e»£ì€¯ì¤oggCCC•NúNNN055:«J¥ÒN[%‰J"‡²³~ûrÊeô4ƒ ÊÎÃíïz¯LLiW|eŒ29 øïH1’ÊÊÊÐÔÔ„ªª*´¶¶¢²²ÍÍͨ©©u'|àAkKKKX[[ÃÖÖVxmkk+tÖV&XZZÂÂÂB(«¼³}ûÉÜþfLÿêêêPRR‚²²2Ü»wOx”——wz]ZZŠ{÷î  ®®fm Oss¸š˜ÀÍÄÕÞÞXåì XXXÀÎÎNíÊ$Ãö£d0õ”I"ÊäRu£©TVV¢®®ÕÕÕ£¢¢¹¹¹*Ó”ulll0lØ0888`ذa°··Ç°aÄGû×ŽŽŽprr⛺2Ƙ¸FÌcŒ±!ÇÁÁ+V¬ÀŠ+………HJJBRR þú׿B"‘`Ô¨Q˜5k‚ƒƒ1wî\xyyé9rÆhmmÅ7‡øøxüðÃ000À´iÓ°aÃÈårLš4 ‰Dã<¬¬¬‰ÈÈH@ff& âââ°qãF¬_¿2™ ˆŒŒDHHˆÊºcŒõ¿_ýêW;v,–.]Š3f૯¾‚¿¿¿¾ÃbŒ±niJ¶ÈÌÌDff&òòòÐÜîní“-d2är¹J²…···ÎGJ’J¥J¥ ìr=Ô%­(ëÒí“VÚ¯‡¦Äm“V{èqBcŒ1ÆúACC*++QYY‰ªª*•¿*¯Õ=Wv¬W§cGööÞ===…ŽívvvB'UKKKáNäfff°µµ…¡¡¡p'sM#f0Ö}&ô(;WVV¢µµUUUhhh@]]*++QSS#t>Vv2V&Kݽ{·Sb•ònöš(ÿlmmagg§ö¹T*ÕX¦ýè6Œ±ÿjkkCII Š‹‹q÷î]”””àîÝ»(**ê4­ãÿ¨‘‘Q§Îþîîî7nœ |´I‚Gꦦ¦055ÕÙ¶W&¶O mŸÔ>i(77W%i¨ýÈXÀƒº•»»;œœœàææggg¸¸¸ÀÕÕNNNÂ{NNN|ž•16äqBcŒ1ƆglȨ®æ„ÆcŒ‰ÖÖÖ¦Ò¹°cGÃŽw(¿wï*++ÑØØØi^‰vvv:Œ»¸¸`äÈ‘j;˜·­À‚GÓ`¬e"“®;v+“²êêêT:kJàúé§Ÿ„çÊ„/"ê4_e2VûêÊ;ÔwœÖþÁÙ`WXXˆ¼¼<á‘““£òº¤¤Dåæ'æææBg|gggŒ7Nè¨ïââ'''888ÀÁÁ¡W×­Ùà&‘H„›ïh«ªªJ¨ÇuLF*((ÀÕ«WQXXˆ¢¢"Ô×× Ÿ344„““¼¼¼àéé OOOx{{ Ï===áââ¢ËÕdŒ±‡BcŒ1Æ:pttÄ’%K°dɆEþöÛoqùòe\¹r¯½öîÝ»###Œ;V%I$ @çwªeCSǤŒk×® I[¶lAdd$údÙæææËåËåT“QÞxã lÙ²EHF‘Ëå½NFaŒ1¦\¼x¿úÕ¯†÷Þ{kÖ¬ÑwXŒ±‡Pcc# 4Žî‘‘‘ÊÊJ¡¼™™™J2C`` ¢££…ĆáÇ?ÔõFccc¸¹¹ÁÍÍMcÒHWÛT¡PÏ•:nÓŽÏömʆ˜Š ÀÎNßQ0ÆcLOZ[[QRR"Üi\Ù °¸¸XcâGG–––6l˜Ðuذa?~¼Ðy[™ÌÑ1¹ƒëÔŒ ¶¶¶½N¾ª®®V›@RYY©r|ÉËËÃõë×…×êFÒ”(¢¼ƒ½££#ÜÝÝáèè'''¾†Ìú]II îܹƒôôtddd ''¹¹¹ÈÏÏG^^ž$)‘Hàââ"t¦†··7œáææ&$}ðï%ëkÊ㼟Ÿ_·e•£J)“EŠ‹‹…¤¦o¿ý‡BQQ‘hjjª’ âãã??? >þþþpttìëÕcŒ±>Å !Œ1Æcݰ±±Á¢E‹°hÑ"aZFF®^½Š+W®àêÕ«8pàêêê`ii‰   L™2'NÄøñã1jÔ(qµ‹u/33 …qqqP(hhh€L&CDD¶oߎ˜ššö{\¾¾¾Âè!­­­¸qãâââ÷Þ{†††˜6m"##!—Ë1iÒ$H$’~“1Ɔ333|þùçxõÕW±víZdffbÛ¶mú‹16ˆt5š…òuÇ fíG³ËåX½zµ  “Ét~çχ‘©©©°½4éjÔ•´´4¤§§£ººZ(ß>iD]âˆ,--ûcõë»w‡1Æ{è(“<Š‹‹QXXˆÒÒRܽ{ÅÅÅ*É¥¥¥ÂhzÀƒ9;;ÃÙÙYèdíïïßåûÍÌÌô¸¦Œ±ÁÀÆÆ666ðòòÒês ÝŽDTZZŠŸ~ú EEE(..FCCƒðy899ÁÑÑQè`¯ìlïèèWWW!‰ÄÉÉI׫ÍbEEEHOOÇ;w:=”çÌÌÌ “Éàãパ#GB.—ÃËËKHñðð€‰‰‰ž×„1í(ç£FÒX¦©© yyyÈÏÏï”uõêUdff ‰QÊD•ÇðáÃytÆØ À=cŒ1ÆzÀÏÏ~~~X±b ¥¥iii¸rå ®\¹‚³gÏâwÞASSLMM1fÌŒ?^åaÇw½òjjjpîÜ9ÄÇÇãÔ©SÈÍÍŰaÃð‹_ü»wïÆ#<}‡©ÂÐÐAAA ¶mÛPZZŠóçÏC¡P`÷îÝØ²e œ1{ölDDD 22’;2ÆX‘H$ضm±aÃTTT`×®]œ”ÇTTT¨M(P>ÏÉÉAkk+€#[888IÁÁÁF¢ðññž×jh077ï6i¤¢¢Bãw›””„ììlÔÕÕ å¥R©ÆQFd2¼¼¼øFLÿîÞÆŽÕwŒ1ÆÓBw펼¼<477«|F*• uQWWWŒ9Rx®ü«,Ãç8c…™™ÜÝÝáîî.ú3õõõ¨¨¨@aa¡pl¼{÷®0í»ï¾Caa!òóóÑÔÔ¤òYndzŽîÞ½‹ÔÔT¤¤¤ÓÒÒpÿþ}Î');°ÏŸ?ëׯ^{zzòo*’LLL„¾=ê´µµ!//¯S2Õ‰'pçÎ!±ÏÚÚ;v,0fÌÂolÂ@¸fÈcŒ1¦FFF7nƇuëÖxpÇÝ[·náæÍ›¸yó&nܸ'N ¬¬ àãã#$‡Œ7ð÷÷ç“w±¶¶6üðÃP(P(¸páÚÚÚ0a¬\¹r¹sçÎTû€££#¢¢¢…wß}WeýÖ­['¬Ÿ\.”ëÇcƒÁ3Ï<©Tеk×¢²²û÷ïçc-c¹®’Ôuºj߉@&“A.—«t"ðöö†¡¡¡׈iK*•B*•"00PcMó”#¶O RÎSÓ(#œIxQn IDATÄúEa!àêªï(cŒ1öÊúdNNòòò——‡‚‚áyaa¡ÐîH$pqq‡‡ÜÝÝ€ÐÐPa”AåHz^+Æë?æææ077‡››‚‚‚º,[VV†ââba4¥‚‚á.öׯ_G\\œÊH®ÆÆÆpuu…§§'<==áîî.<÷öö†ŸŸßœpºÿ>®_¿Ž””•䊊 €““ÆŒƒ©S§â駟ƈ#àïïwwwNú`LKðöö†··7æÏŸ¯ò!??wîÜÁíÛ·‘ššŠÔÔT?~¥¥¥{{{Œ3FH3f &Mš+++}¬clˆãÞŒ1Æc}ÄØØXHY½zµ0½¢¢©©©¸ví®]»†ÿûßxýõ×ÑÚÚ cccxzz" ÂßÑ£GÃÂÂBkÃzªýqqq(,,FÐxÿý÷ª4 „ÑCbbbTF@9xð vìØ!Œ€"—Ëä(Œ16X­Zµ ¶¶¶ˆŠŠB}}=>ÿüsN alª¯¯W›è¡|~ûömá΀êÝuÕîáëëËm‰!J*• õsuš››QZZªv»víâãã‘¶¶6€©©©Ð¡OÝÝI•Ïë‘– ´à}ˆ1ÆëWʤM¥öíŽÀÀ@Èår•z ,--õ¸&Œ16¸988ÀÁÁ¡Ë?455¡¬¬Lm;þòåË8zô¨ÊÍÚßø¡ãƒo20´´´àçŸú |ûí·¸qãZ[[akk  <<˜Î;‡ï¿ÿW¯^Å7ÐÔÔ[[[Lž<¡¡¡øýï)S¦ð÷Àئ®Ïð Þ}õêUáqüøqTWWÃÔÔ&LÀ”)S0mÚ4Ì›7îîîzŠž1ö°â^Œ1Æc€¹¹9&Nœˆ‰'ªLonnF^^žÊÝΜ9ƒ]»v¡¾¾@ç»»(G1b¬­­õ±:CNVV P(pæÌTUUA&“A.—#&&aaaÜé î„èèhÔ××ãÛo¿…B¡€B¡ÀÛo¿ Ìœ9r¹‘‘‘ÐwÈŒ16èÌž=§N¢E‹°bÅ 6;;ééé¸}û6ÒÓÓ‘žžŽØØXäää ­­ ‰>|8†???øúúB&“Á××R©Tk6¸uLf¸víšÌðÊ+¯àÑGÅèÑ£õæ€fnn¹\¹\@5©fûöíØ²e‹T#—Ë9©†1Æ´Œ¸¸8„‡‡ãÉ'ŸÄçŸCCC}‡ÅØ WQQ¡¶Ã»òyNNZ[[<¨Ç;88܃ƒƒ;%zøøøÀÀÀ@ÏkÅXï™››w›4RQQ¡ñ')) ÙÙÙ¨««Ê+o| n”™L///ëasç`l xxè;ÆclÀ«­­ÅµkלœŒ´´´N‰... ÄÔ©Sñ«_ýJ¸‰”½½½ž#gŒ1¦o...pqqÁÌ™3;½wïÞ=•›¦¥¥!..ÅÅÅþ›(ˆÀÀ@Lž<“&Mâ›:´ÓÐЀ„„œ>}çÎCZZŒ1uêT,_¾óæÍÃŒ3`nn®ïPcýÀÀÀ£F¨Q£°zõjúš\ºtIHÛ¿?ZZZˆyóæ!,, ¡¡¡055ÕsôŒ±Á†¯˜0ÆcŒ BFFFÂÝ]-Z¤ò^cc#222TE~þùg|ýõ×(((@[[ÀÎξ¾¾ÂC™(âëë ˜™™écÕ4Ú½{7rrr°aÃøøøôûò333¡P(…B††Èd2DDD`ûöí áFy/øúú £‡´¶¶âƈ‹‹C||<Þÿ}`Ú´iˆŒŒ„\.ǤI“øN9Œ1Ö…9sæ >>áááX¿~=Þ{ï=>n2Ö…®:«gff"//ÍÍÍBùöÕ•I¬í;«{{{s"cíH¥RH¥Rj,£)éJÙkŸt¥œ§¦QF8éjº}É$…0ÆcLÐÒÒ‚””\¹rEx¤¥¥¡µµNNN;v,'~0ÆÓ‰aÆaöìÙ˜={¶Êôމ"·n݉'PZZ ###`êÔ©Â#00pHÝÄ¡¶¶_ý5¾üòK|ýõר­­Å¤I“¿ýío˜5k¬¬¬ô&cl€077Çüùó1þ|@MM qîÜ9œ={{öìµµ5ÂÃñtéR,Z´ˆïc¢ ÚcŒ1ÆØajjŠ€€tz¯©© 999ÈÊÊBVV233‘••… .à£>½{÷‰®®®ðòò‚§§'<==áíí-¼öòò‚££c¿®×píÚ5ìÞ½=ö^~ùeLŸ>½Ï–WSSƒsçÎ!>>'OžD^^0oÞ<ìÞ½ááápwwï³åe††† BPP¶mÛ†ÒÒRœ? …o¿ý6¶lÙ'''Ì™3ˆˆˆà‹œŒ1¦ÆÜ¹sqìØ1DDDÀÑÑo¼ñ†¾CbL/êëëÕ&z(Ÿß¾}÷ïßÊK¥R•ÎåG÷ðõõ…………׈±‡“T*Úê477£´´Tíÿóµk×ììlá&¦¦¦pww×8ʈò9 ÒÓáÃõcŒ1¦w………¸pá‚üñÃ? ®®VVV BXXþüç?cêÔ©ðòòÒw¸Œ1ƆM‰"999* ‹‡BMM ,,,0iÒ$L™2Ó¦MÜ9sàâ⢧èûFmm-Ž=Š/¿ü§OŸFSSæÌ™ƒ7ÞxK–,áó Œ1Ѭ¬¬°hÑ"áF°8zô(Ž9‚+VÀÄÄ .Äã?Ž%K–ðµ Æ˜FœÂcŒ16„˜˜˜`øðᮡ“Euuµ,’œœäåå!11Ÿþ9ŠŠŠ„²æææðòòRIqss:Õ¸ººÂÉÉIgwFVGÜÖÖ†¸¸89rãÇǦM›°jÕª^ßi¦­­ ?üð  .\¸€¶¶6L˜0«V­BDDfΜÉw˜ÕGGGDEE!** ššŠøøx( üæ7¿Akk+&L˜¹\¹\Ž9sæÀ˜ï*Ëc€ÐÐP|øá‡X½z5ðâ‹/ê;$Ætª±±G÷ÈÈÈ@ee¥PÞÌÌL¥3x`` ¢££…:ìðáÃacc£Ç5bŒibll 777¸¹¹iLé꘠P(™™‰ŠŠ ¡|ÇcBÇç|LèGéé@p°¾£`Œ1Æú]EE¾ùæœ;wçÎí[·`llŒ1cÆ`Ú´ixúé§1uêTŒ=šG!dŒ16 x{{ÃÛÛ[¸~×ÚÚŠ´´4\½zW®\ÁùóçñÎ;ï ¥¥˜7oæÍ›‡ùóçÃÎÎNÏÑ÷Ìõë×±oß>Þyç,^¼ú1öpwwdzÏ>‹gŸ}¥¥¥8~ü8Ž9‚§Ÿ~Ï<ó V­Z…èèhL˜0Aß¡2ÆNaŒ1ÆcŒ?ãÇWû~CCòòò——‡ÜÜ\äää 77yyy¸té Tî®lhhggg•$‘öÏ•»½˜¥½xpgXHIIÁÚµkñ‡?üÏ>û,Ö¯_¯Õ Ä’’\¸p …'Nœ@QQœ±`Á|öÙgËåJ¥¢çÇúG`` ƒÚÚZ|÷Ýwˆ‹‹Ã¡C‡°cÇØÛÛcþüùËåX´h<==õ2cŒéÕªU«PTT„—^z nnnX¹r¥¾CbL”®FP¾.**è<€\.ÇêÕ«y4ƆSSSÈd2Èd2eº5(--­Ó¨Aí“FÔ%ŽøøøÀÒÒ²?Vïávç°v­¾£`Œ1ÆúEzz:Nœ8¯¾ú ‰‰‰ "Lš4 ‘‘‘عs'fÍš+++}‡9脆†"!!AoŸgŒ±¡ÎÐÐcÇŽÅØ±cñôÓOjjj””„³gÏâܹsø×¿þ„„„ <<>ú(üýýõy׈§N›o¾‰sçÎ! Û¶mnÂĆ®+°þæèèˆuëÖaݺu(--ÅðþûïãÝwßÅüùóñÒK/aáÂ…ú“16@pBcŒ1ÆÍÌ̬ËF ®®ùùù(..FAAŠŠŠ„¿éééHLLDAAª««…ÏÂÉÉIcÒȰaÃP__ßiY­­­€üü|üáÀ_þò¬[·/¾ø"¼¼¼:•oiiÁÍ›7‡øøx\¿~¦¦¦˜5kžþyÈårLš4 ‰¤×ÛŠˆpàÀüóŸÿDzz:êêê0bÄ„‡‡cÕªU3fL¯—1Xœ={ï¼ó.\¸€ÖÖV >Ï>û,žzê©^okKKKadÝ»w#33SååÅ_Äúõë!“ÉÈÈH„„„ÀÔÔTGkÆcƒÇ¦M›ŸŸ§Ÿ~2™ Ó¦MÓwHŒ¡¢¢Bm‡lå󜜡¾gll ¡vppp§DŸ~Í­}&==]åÂõ‹/¾ˆòòr|øá‡Â4…BÐÐPáµ2‰¥?âÓŲ”ó3/mÊj³ü¾Þfº¢m¬šÊ÷ÅvÔ´ü¾\ŽØù÷e›¡#ssón“F***4›’’’ºº:¡¼T*Õ8ʈL&ƒ——W¯Gµ|¨••åå@çcŒ±ÁîîÝ»8tè>ÿüs\»v öööX¸p!8€°°°Aqc"uõ2;;;Œ9›6m²eËôÕµµµéüó=©ßªõßþªÛkЧ?Úƒ¡½6Xâ(ô¹ßêÂ@þ¾û¶Õ–••.\(tX.//ÇéÓ§‡×_/¾ø"&OžŒ'žxË—/‡«««ž#VuùòelÞ¼‰‰‰Ɖ'¡ósb)—ëää„””8::ª}èÿ}LÛó‡ƒñ@u…ÔÔT<÷Üs¸zõ*jjjôíw¥ïzH.ûa?¾:::bÓ¦MØ´i’’’°cÇ<òÈ#˜2e þú׿bΜ9ú‘1¦g|‚1ÆcŒé”……FŒ#FtY®¡¡ååå(,,:×´ÿ{ýúõNw}îJkk+êêêðî»ïâwÞÁ¢E‹ð§?ý NNNB‚À™3gPUU™L¹\Ž˜˜,\¸ÖÖÖºZ}Á«¯¾Š«W¯bß¾}1bðÃ?`Ó¦Mxã7ÚêÌŸ?ãÆÃéÓ§1f̤§§ã™gžA^^þô§?étY2™ ÑÑÑˆŽŽF}}=¾ýö[áûçw`nnŽ™3g I$AAA:]>cŒ do½õîܹƒÇ{ ßÿ½ÚäIÆt¥«ÎÔ™™™ÈËËF}T;S+ëjí;S{{{w;¢\jjj‚““nß¾ÝéÂë™3gP^^޶¶6!AE.—£¢¢¾¾¾(..îóøˆH§V•óÓuY¦YmǾ^ŽØù÷g›A ©T ©TŠÀÀ@e4%µ)“ÔÛ'µ)ç©i”‘þNjpnÞ|ðwìXýÆÁcŒõÄÄDìÚµ LJµµ5–.]*tØHm1:¶3ZZZP\\Œo¾ùÏ=÷Š‹‹ñÜsÏé-¾o¾ùF¯ŸÔ×¹Ä#ÞoûÎPß¶öööX¹r%V®\‰––\¸pŸ}ö¶mÛ†—^z =öžþyÌš5K¯qVTT`Æ øôÓO!—Ë‘œœ< ®)*÷Ÿ§žz /½ô>þøcµïëã:ôPØ·uQWX»v-6oÞŒ“'O"11Qå&B}AŸßK/{(ìƒJ³fͬY³œœŒÍ›7cîܹX³f vïÞ ;;;}‡ÇÓ ¥žhŒ1¦Hp‡± ú½Ë {8H$>|¸Ïîš´oß>DGG÷ɼë/õõõP(xôÑG{<KKK̘1X¼x1|||t ÎÎÎøÿïÿÁÉÉIezZZ‡TBˆD"ÁÍ›71nÜ8aZFF‚ƒƒQTTÔoqáÌ™3ˆGBB*++…§r¹ ,€­­m¿ÅÃcúpÿþ}ÃÐÐIII°´´ÔwHZ»víôÙ8eÝ<66¶OæßÞ`­¯××׫MôP>¿}û6îß¿/”—J¥ïšïêê ___XXXèqz&** ‹/Æ“O>)L»yó&¶nÝŠêêjüïÿþ/‚ƒƒ…÷>ŒÃ‡ãÈ‘#ýŸ®/þj3?}.[ßt5BHOæÕS}½1ó(m]jnnFiii—ÇËììláî’¦¦¦pwwïòxéææ¦çµê#o½ìÜ Ü½«ïHcŒ bÛb½i^¼x›7oÆ÷ß3f`ãÆX¼x1ÌÌÌ´ž×@¢©n§P(ÌÌL=DÕwzRWV÷}µix„ÿ,q$ƒy› ôØz|úÐÐЀ£Gb÷îÝøþûï1}út¼ùæ›zI ¹zõ*–,Y‚¶¶6ìÝ»‘‘‘ýCW$ jkk1vìXìß¿sçÎíô¾¾ö/±Ë~˜þ´]ccc444ôkbð`Ø'ëòŠcÇŽá·¿ý-ŒŒŒpìØ±‘À¦N^c¬¿õõõëö4õ7¢·›bŒ1Æcƒ…¹¹9LLLD•mÇKKKÌ;_|ñÊËË‘€7öK2ÔÕÕ©M.è4\¼º;Uhš~óæM,X°–––°±±Á¢E‹púôi­Ë¤¤¤ <<ÖÖÖ°¶¶ÆÂ… ‘’’¢R¦ªª /¼ðd2ÌÌÌàêꊵk×âÊ•+Z•!"•Ž]0lØ0466ªÛt}ÆÅÅkÖ¬All,ÊÊÊœœŒèèh¤¦¦bÅŠpppÀäÉ“±mÛ6\»vmHž,bŒ=ü¬­­qüøq`ݺuú‡ @ÈÌÌDRR¾øâ ìØ±7nIJeË0yòdØÛÛÃÂÂ~~~ Åúõë±oß>dffB*•B.—ã­·Þ‰'œœŒªª*”——#55 Ø»w/¶mÛ†èèhÈårÊdÇÉ“'U¦=záááxôÑGqâÄ •÷Nž<‰ˆˆáõ‰' 333øøøà…^PI¤ÄÕµzKLJ¹¹¹X²d lmmaee…ððpܺuKÔrÄÔ?ÕQÖ‰•õc‰D¢rüJIIÁ#<"Ì÷‘G5_HMMÅ#<+++ØØØ ,, iiijëâbâï.Vm× òòò°xñbX[[ÃÙÙO>ù$îÝ»×i^=ݾšˆÝ®bÚíùûû ëÚ¾SÇ@i3è’±±1ÜÜÜ„¨¨(lܸÛ·oGll,’’’‘‘ºº:ddd 11@tt´pÁH¡PàÕW_Ž¿îîî077Ž¿kÖ¬Á–-[°oß>ÄÅÅáÚµk¨®®ÖóZ÷Ð?'ê; ÆcL'***°lÙ2Ì™3¶¶¶øî»ïpéÒ%,_¾|Ð'ƒteêÔ©¸Û!¹S›sßÊib뿺ž§¦Ï·¯c}½}{ÂÖÖK–,Ann®Æ{ÓžêHL[±»øÅ´•ŸËÈÈÀÒ¥K!•JU¶UOÚ@‰iiiX¸p!lllºÜbÚ%ºØbÛ†Ýme̺¸ö¢‰6ß›Øÿ'm÷厺kêj»i³þí—«î›7cýé“O>! ò044žѨQ£èù矧ÌÌL}‡L+W®¤çŸžÊËË»-«©ZÞqúÏ?ÿL#Gޤ„„ª­­¥ÔÔT Q)'¦Lzz:¹¸¸Ð»ï¾K%%%TVVF ™LFyyyB¹Å‹Ó¶mÛ¨¨¨ˆèúõë¬2/1eÔÙ´i…‡‡w»múKii)ÅÆÆRtt4¹»»rrr¢¨¨(Ú»w/ݽ{Wß!2ƘN={– iÏž=úEkÉÉÉ”œœÜgóŠŠ¢¨¨¨>›{ý]_ojj¢‚‚JNN¦ØØXÚµkÅÄÄPTT“L&#‰D"Ô¯LMMI&“Qpp0EEEQLL íÚµ‹bcc)99™ ú5þ¦¸¸˜œœœ¨µµU˜6~üxÊÏϧŒŒ 5j”0½­­\]]©°°P˜€öîÝKµµµTXXHkÖ¬¡§žzJe=­k)ç/†˜8”åf̘A ÒÒR*..¦÷Þ{¼¼¼(++«Ëe‹­j».éééäááAû÷ï§ââb!&wwwJOOïržwîÜ!OOOá³eeetðàA3fL¯â×ö”{WåЂ èäÉ“tÿþ}ÊÍÍ¥¨¨(Z»vmãƒØí*¦íÑqþÉÉÉäããC?þøc·q ´6ƒ¾ÔÕÕQFF%$$ÐÇLÛ·o§èèhŠˆˆ    ²¶¶Vi'K¥R  ¹\NÑÑÑ´uëVÚ»w/%$$PJJ ÕÔÔè{•:=šèÐwŒ1Ɔ±m1mÛŸ·nÝ"???òôô¤¯¾úª§á hšê¯ 4räHÑåÕMWWÿ]²dI§ú¯6±i3OMŸW§c{¢´´”8@3fÌÐ8±í)1Ķ»koˆm †††Ò·ß~Kuuuôõ×_«Ì·'m 1cÆÐÁƒ©¬¬Lã¶Û.éí¶Ð¦mØÝöÐåµ—®¶ŸØïM̾¯í¾Ü‘6íÂÞn7m×_]´Ÿ»Z±ç(zzÞ¡7ûÎ`O4|øpúùçŸû|y?ýôÙÚÚÒêÕ«©¹¹¹Ï—×Sí¿ïU«VÑ_þòï‰ß¿Ä–ëíqC]Œícèɹ¾ääd²±±¡––aÚG}Dîîîô÷¿ÿ]¥ì¨Q£:ÕïzslÑŹ@m]ý.‹Y¦®»ÚìÚ.»7ëªé<¤Ø~üñGš2eŠÊgÛÚÚÈßߟnܸÑeúÒÜÜL«V­";;»~9nj«?¯¿1Ößúúúu{šú›>œ5BÆëCœÂt‰Bç­·Þ"‰D¢’"‘HÈÅÅ…–/_N *ñ‚ÚÚZzþùçÉÞÞž-ZD{öì¡üü|µeÅ^[¹r%}òÉ'*Ó~úé'•rbÊ<ñÄ´cÇŽNËÛ¿?mذAxmeeE÷îÝS)“™™©2/1e”rrrè“O>¡)S¦ƒƒݹs§S™"%%…¶oßNr¹œLLLÈÀÀ€‚‚‚(&&†¨©©Iß!2ÆX¯mݺ•LMMéêÕ«úE+œ¢Yyy9%''Ó‰'hïÞ½C«W¯&¹\N2™L%™ÖØØ˜\]])((ˆ¢¢¢hÆ ´}ûvŠ¥ÄÄDÊÈÈpõ«hêÔ©ôý÷ßQFF?^xo̘1tûöm"z°ßNž<¹Ëy•——“½½½Ê4mêZõ´3€º8”óûðõàj IDATÃ;MÿÛßþÖé"`Çe‹­j¢i]žxâ Úµk—Ú˜ž|òÉ.çùä“OªýìÁƒ{¿®BŽ;¦2-33“ÜÜÜzŸ˜ÄnW1möóOMM%áFÁÔfHÊËË)%%…hïÞ½´uëVŠŽŽ&¹\NdaaÑ)i$((ˆ"""(::š¶oßNü1%$$PFFFÿv|©«#24$úâ‹þ[&cŒ±!¯/Bª««iĈ4}út*..îMxZûº^KK Ð'Ÿ|B...ôñÇwY¾»éêê¿?ýôS§ú¯˜Øz2Om:yjjO|øá‡ç#¶=%†Ø¶¢6í“®Ú‚çÎÓø¹ž´:ÔizÇm!¶]ÒÛm¡MÛP9MÛC—×^Äêê{³ïk»/w¤M»°·ÛM®Ö_×ËRη«õ{Ž¢§çt¹ï EEE4mÚ45jÝ¿¿O—IAAAÔØØØ§Ëé­ößwIII§¤žî_bËõö¸¡.Æö1ôä\_[[9;;Ówß}'L›?>%$$ÐÌ™3…iéééäììLmmm]ƧͱEçµ=~tõ»,f™º>îj³Oh{žµ§ëÚÕyHmb˜8q¢JòÇ×_M¿øÅ/´Š©¿544ÐĉiñâÅú¥Na3NaŒ±AˆB˜.qBcšSll,­^½š¬¬¬YXXмyóè£>¢ŠŠ }‡(Jee%>>€¬¬¬(""‚öîÝK999ú‘1Æz¤µµ•BCCÉ××wÐü® Ý„®:ûÊd2266Öª³oû»¥±ž{õÕWiÛ¶mDô ‰ù•W^Þ{å•WèÍ7ß$"¢¿üå/B¹®t¬ÿiS×ên^½ý,***ê4=33“\]]»ü¼Øú§6ñt5ßÌÌLrqqéržš>[VVÖ«øuÒqÄÁ––’H$=ŽOL b·«˜¶‡rþ™™™$“ÉèìٳݖŒm†Á@›¤Áö¿#}ž4xù2@ôŸ:Æc¬?ôEBÈ›o¾IjëÌ“öõåÃÕÕ•4–;]]ý·¡¡¡Sý·«Øz3Om:yjª3öº=%†Ø¶¢.Ú'¨¶¶V§Ë(++ë4½ã¶Û.éí¶Ð¦m¨œ¦í¡Ëk/Úè;¯í¾Ü‘6íÂÞn·®æ-fš®–¥íz¨;GÑÓóºÞw‹ÂÂBrppun­§ŠŠŠH"‘ÐñãÇûlºÒqÿùàƒ(44Tãûb÷¯Þ–{ÜPcwós®ï©§ž¢×^{ˆˆòóó…›óüâ¿ ÌÌL"Ò.Sì±E¿µÚ?ºú]³Ìþ:—¨nŸÐö>^«˜ôáèÑ£$‘H¨¤¤Dß¡¨à„ö0ã„Æ„8!„é'„0¦Þ¡C‡„‹I¦¦¦ôòË/Ó­[·ôV¯ÕÔÔÐÒ¥KiÍš5*ÓÅ^344ìö®4bÊ©½p€ÌÍÍ…r•••ô /L&#kkk ¡]»v©ŒŽ!¦Lwë9ØÜºu‹^~ùe255¶›º;š1ÆØ`PRRBnnn´jÕ*}‡"ÚPLY°`ÆßîqãÆQtt4}ðÁ”˜˜HYYY<’U?JNN¦éÓ§Qpp0%%% ï}÷ÝwBDDÓ§OW§¤¤„~ýë_“»»{§NØíiS×êHLÝKl]ͯ¾¾žŒŒŒº,+¶þ©íºRCCƒ¨˜Ä~¶·ñë:!D×ñ‰™ŸØí*¦í¡œÿˆ#ÈÉɉâââ´Š…õŸ¦¦&ÊÊÊ¢ÄÄDúàƒ(::šÆ§qßZ°`n¼k‘½=JÅc¬õEBÈÒ¥Kiùòå½ kPèX_«¨¨ ?ü|}})++«Ûò]Mצl_ÌS›ÏkÓžèj>bÚ.êˆm+jZ®.Ú‚bß[^]{CL»¤·ÛBWß%‘n¯½¨£‹ïMlû¯»umÿy±íBMÄn7]¬_Ÿ ÒîEOÛõ½9O4Ø­Y³†yä‘>›bb" »wïöÙ2tEÝ~8wî\úì³ÏÔ¾/vÿ[®·Ç®Êõæ\ß¡C‡hÞ¼yDDô׿þ•vîÜIDDûöí£ÿû¿ÿ#¢Û©cÿœÞ[tñ;Ø›ãGO–©ëãî@9ϪüLwç!µ‰¡¤¤„† Fµµµ”‘‘A£Gîv„™   €Pbb¢¾CQÁ !ìa6B ÀcŒ1ÆØ†;w",, xóÍ7±|ùrlÞ¼ß|ó õbXZZâïÿ;Ž;¦2]"‘tZ§òòòNŸ6lÊÊʺ\†˜2(//=HWyÔÕÕ ålmm±sçNddd 33Ï<ó :„•+WjUæaÐØØˆo¾ù›7oÆòåËñæ›oÂÀÀ@e_eŒ±ÁÈÑÑ}ô<ˆÃ‡ë;¦ÁŽ;ðü111xâ‰'˜˜˜àÇľ}ûðë_ÿË–-Ãòå˱bÅ <ÿüóعs'bccqéÒ% µµUß«òЙ4iòòò––†ŸþÓ§OÞ›6m233qûömäåå!((HxoõêÕ°´´DRR„ºXG}]ׇ’º:jaa!»\ŽØú§¶PXX¨6&‡n?[TTÔiººi}¿®è:>±ÛULÛCiÛ¶møê«¯°nÝ:|ÿý÷ZÇÄt§µµ¸tébcc±sçN<ÿüóX±b–/_ŽeË–á׿þ5öíÛ‡ü&&&ðññAHHžxâ ÄÄÄàÿøvìØ¡›€.^fÍ ørcŒ±ÁÍÏÏ©©©C®Ýegg‡µk×bíڵغuk§÷Åžûl4µ'îÝ»§ñ3=mO©ÓÛ¶¢¶mA]+..î4­ã¶Û.éí¶Ð¦m(f^ººö¢N_|o=Ù—ÛÓ¦]ØU b¶›.Ö¿/Û÷½=GÑ—ûÎ`×ÚÚŠ›7oÂÃãϖáìì (((è³eô¥½{÷â÷¿ÿ=*++;½'vÿÒ¦\oŽ]éÍÿÑ‚ påÊÔ××ãàÁƒÂÿÅã?Ž#GŽ ¢¢—/_Æ‚ T>§ïßD wëÝÓåéò¸«Í>ÑçY»;©M ŽŽŽÆ¡C‡ðî»ïbÆ H$:‰³/)e...zŽ„1ÖŸø ;cŒi¡NâÁHÏ‘0ÆØÃÍÎÎ/¼ðN:…{÷î!!!‹-ÂÙ³g {{{„††bÇŽ¸víš¾ÃUK"‘ ;;»Ót###XYY©LsqqAnn®Ê´‹/vúìܹs¡P(T¦Ý¼yZ• Ãùóç;Í?11“&MRY‡üü|NŒ,_¾_}õΜ9£U¥þ>yÖ[™™™Ø·o–-['''Èår|ùå—˜>}:bccQ\\ŒS§Ná…^€¾ÃeŒ± Åúõëñ?ÿó?Â1 ,&LÀ3Ï<ƒíÛ·ãÓO?ÅÅ‹‘••…ÆÆF”——#99ÇǶmÛ0oÞX" )  éý¼cŒ1={ê©§‘‘?üáúE/~÷»ßáøñã’Äžû¨4Õ×5µ'¾ùæóêi{JS\bÚŠšâ×¶-Ø],ÚR×îê¸-ĶKz»-´ivG—×^ÔÑå÷¦Ô“}¹=mÚ…šˆÝnÚ¬¿¦ï[×íçözsŽ¢¯÷Áî•W^Azz:6nÜØgËðóóÃÈ‘#±gÏž>[F_1bÖ­[‡W^y¥Ó{b÷/±åz{Üúæ\ŸT*Å„ °gÏ8;; áíííáââ‚;wbòäÉ®éööت‹s{½YïžÐõqW›}¢/ÃJ݇Ô6†§žz {öìÁñãDZfÍÄØ×þñ`ôèÑÉdú…1ÖŸº\„1ƘR-ÕGꇕcL[Ð0„—®ˆöœ±Á¤°°>þøcŠŠŠ"©TJH&“Qtt4ÅÆÆRee¥¾C$¢ÿß³fÍ¢K—.Quu5ÕÖÖRrr2ÍŸ?Ÿ^}õU•²kÖ¬¡_þò—”——G÷ïß§S§NQXXX§aPüñGòó󣄄ª©©¡›7oÒ¤I“èŸÿü§Ve²²²hìØ±ôÅ_PYYUWWS\\¹»»Ó±cÇTÖ!,,ŒRRR¨¡¡ŠŠŠè•W^¡%K–hU†ˆhæÌ™¬“mÛWjkk)!!bbb( €¥¥%ÉårÚµkeggë;DÆë3÷ïß'??? ×w(Ýêë!wûsÈêþª¯———Srr2ÅÆÆÒ®]»(&&†¢¢¢(88˜d2C¢›˜˜««+QTTÅÄÄЮ]»(66–’““©   _bLŽ9BèÓO?íôÞñãÇ 9rDezXXýæ7¿¡¬¬,jll¤ôôtZ³fM§úŸØº–:bNýŠC9¿ñãÇÓáÇ©¬¬ŒJJJèý÷ß'///ÊÊÊêrÙb럚xxxÐ¥K—¨©©‰NŸ>MDDtûömrww§÷Þ{Š‹‹…˜ÜÝÝ)==½Ëyfdd§§'íß¿ŸJJJ¨¬¬Œ>ÿüs îUüšbÕvÝÔmGMÓ{»};ÎOìvÓöP7ÿC‡‘¿¿?©L m}«««£ŒŒ JHH ?þ˜¶oßNÑÑÑ$—Ë) €¬­­…ã9’J¥@r¹œ¢££iëÖ­´wï^JHH ŒŒ jjjÒï ýô@tù²~ã`Œ16äˆm‹iÛþüä“OÈÈȈV®\IUUU= o@ëªñä“OÒ[o½¥2Mì¹ï®æ-¶[Koç©nš¦úzÇöĽ{÷èðáÃ4~üø^µ§š››ÉÚÚZÔºŠi+jŠ_Û¶`W´m ™3gÒgŸ}Öå¶Û.éí¶Ð¦mØÝöÐåµutñ½uœ®í¾ÜQOÛ…í‰ÝnÚ¬¿¦ï[×íçö´9GÑÓv}oÎ F•••´|ùr222R{ÞM×þýï“D"¡Ï>û¬Ï—ÕšöÃÆÆF3fL÷/±åz{Ü ê›s}DD¯¿þ:YZZvú?ûì3266¦×_½Ógz{líÉï`G½9~ˆÑÓ}Bì¶ÑfŸÐæ8¬‹uUwRÛß‚ÆÆF²··§ßÿþ÷ZÇ£Ÿ|ò I$’N×$‚þ¼þÆXëëë×íiêoÊ !Œ1¦… ª è4Öw(ì!Á !ŒõNKK %''ÓöíÛ)88˜ ÈÈȈ‚‚‚hëÖ­”œœLmmmz‰í»ï¾£ßüæ7äïïO&&&dnnN'N¤wÞy§SL¥¥¥´jÕ*rtt$KKKŠŒŒ¤ÜÜ\¡O{—.]¢àà`233#OOOzã7:-[L™Û·oÓã?N¶¶¶diiIS¦LétR@¡PÐc=FÆ #òõõ¥^xª««µ*CD4}útš9s¦ÖÛ±¯eddЮ]»H.—“©©) €€Š‰‰¡„„jllÔwˆŒ1ÖoÉÀÀ€<¨ïPºÄ !º×ÐÐ@”˜˜H±±±´}ûvÚ°aEEEQPP¹ººªt0633#™LFr¹œV¯^M111´wï^:qâ%''?´Ÿ4¹ÿ>™››SYYY§÷êêêÈÎÎŽîß¿¯2½¸¸˜V¯^MNNNdbbBcÆŒ¡Ã‡wªÿ‰­kuÔþûêŠØ8”¯SSSiÁ‚deeE–––´hÑ"JKKµl1õOMbccÉ××—LLLÈßߟŽ?.¼÷ã?ÒÂ… ÉÒÒ’,--iáÂ…ôã?ŠšoJJ -Z´ˆ,--ÉÚÚš"""èÎ;d``Щ¬Øø»ŠU›uÓ´u½}5ÍOìví®íakk«2ÿ‚‚•e8p@(;PÛ ý¥»c±VÇâîŽÂ{ïYXq»‹1ÆX?ë«„¢uxGGGrvv¦½{÷RsssOBºkg\ºtIx¿´´”ˆÄŸûÖ¶þ+&6mæ©©lWõûöí +++Z°`¥¦¦öª=•MAAA]®+‘ø¶¢¦øµm võhÛ@YYYAÖÖÖ·‘¸vIo·‘ø¶¡˜í¡«k/êôä{Ó{b÷eMºkêj»‰]¢®¿o]´Ÿ5­‡˜}¶7íúžî;ƒMss3ýë_ÿ"'''rrr¢³gÏöÛ²7oÞLFFFæ¼mGÝíƒIII$‘H:M»ß‹-×ÛãF_ë»~ýÿ³wïqQÕ‰ÿÇ_ 7D. ˆˆ šÔ2IÓÊR\¿•º]ÔÊ]Ü6Këûk±¶5+-µvM­mó²]vµÔon e*i™–›—´LS Q.‚‚ ÈE®¿?fgS—÷³Ç<Ïœ™ybÎÜ>ïóÙWåîî^UXXXcùùóç«\]]«¾þúëZ×ù¥ûÖ+y¼Ô~äJ÷—[–ø%ûœ+Ùï^ÉßÄåÜ÷Õlë•|y%ÏeeeU!!!U—•ÃH¯¿þz•ƒƒCÕÌ™3ŽR'B¤%k …»ÿ^(""—á4§éHG¶±›¹Ùè8ÒØÙÙ±zõjÆß ·¿téRâââä¶Eš¢ììl¶mÛFRR~ø!ééétèЛo¾³ÙLll,þþþFÇåää°uëV’’’øè£HKKÃÇLJáÇc6›=z4FÇ1L\\ >|¸ÖÔéMÅÞ½{ˆŠŠjÛ·¾6_³fMƒÜ~uÍéõzII $''“‘‘Á©S§jœ?vìùùù¶õ]\\ 44”ÐÐPüýýmÿö÷÷'$$777·H¤nß}÷£F"%%Åè(Ò‚”––’]kß™œœlûwff&Ö¯kœ ¬µï¬¾_5™LoÕ50y2¤§CR’ÑIDD¤•¹Ü÷bWûþ377—ùóçóÊ+¯Ð¾}{âââxôÑGiß¾ýUå•Öã•W^¡¸¸˜'Ÿ|Òè( ÆÎÎŽæ0LIï E_~~>o¾ù& .$##ƒx€¹sçâããÓ¨9-ZÄc=Æ]wÝÅë¯¿Ž··w£Þ¿ˆ4-kÖ¬!11‘wÞyÇè(õÊËË#>>žeË–1kÖ,fÏžmt¤:5æ÷o"­¡¿¿®®¾ñ¦ ~Ï""-H)¥8ádp©‹ãÆcܸq€åûÄÄD’’’xä‘Gx衇èׯf³³Ṵ̀aÃptt48µ4¤ŠŠ öïßORRIII|úé§TUUÑ·o_î½÷^FÍ7Þˆ½½½ÑQEDš„yóæ±~ýzfÍšÅâÅ‹Ž#Mˆ‹‹‹mr}rssë,‹$''³cÇRRR(**²­o2™ê,‹XÏáà /¥áØÙÙñÊ+¯0iÒ$\]]9|ø0Ó¦Mãá‡6:š43¹¹¹õæ’““9qâ8::âããcÛ× 2¤Ö>0$$¤å¿G©ª‚­[¡™#EDD®„ÉdbÞ¼yÄÅűxñb.\ÈK/½Äرc¹çž{¸å–[ô¹´Ôiÿþý¼öÚkFÇhuôÞPÄ8eeelÚ´‰U«VñÁàèèÈ”)Sxä‘GèÒ¥‹!™¦OŸNxx8¿ûÝˆà™gžáw¿ûž»EZ;;;þóŸÿ0þ|–/_ntœ:•••ñÆoðÜsÏáììÌG}ÄÈ‘#Ž%"Ñ7ª""W ËÑN=ð08‰ˆˆ\ŽÈÈH"##‰§°°/¿ü’„„V¯^Íüùóqwwçæ›o&66–‘#Gdtd¹²²²Ø¾}; $&&’››‹ŸŸ111¬ZµŠ˜˜˜&{Ô{£µoßž 0eÊ&OžÜ(G1‘–Ãd2a2™ˆŒŒ¬wúM'''“””ÄÉ“')//¯q›õÍ2ÒjMKƒILLdáÂ…<ýôÓØÛÛÓ­[7yä&Ožlt4iB.UvKNN&55•²²2ÛúÕËn¡¡¡˜Íæû¯àà`Ú´icà5ß~ iipÛmF'i0¡¡¡¼òÊ+<÷Üs¬\¹’•+W‹··7±±±Œ=š[n¹www££JñÖ[o¡AÙÙÙÙ~6¥YBôÞP¤q°yófILL$''‡èèh.\ÈÝw߇‡ñãqn»í6<ÈsÏ=Çã?ÎÂ… yâ‰'˜4i...FÇ‘FÍÃ?L¿~ýŒŽRCqq1o¿ý6/¿ü2iiiL›6?ÿùÏxzzMD dWÕ”Þe‰ˆ4q;ÙI4Ѥ‘F FÇ‘ ¾)¼®•Ëö\¤5²:LJJâã?¦  À6XgôèÑÄÄÄè½f¢¼¼œ]»vÙfƒÙ·o... 2Ä6Œ4‹ˆ\¾ªª*† ÀöíÛ NS[CO¹Û˜SVëõzݬ¥‘ú޶Ÿ’’Bee%NNNx{{ÛZ×W©Kqqq½EŒŒ RSS)((°­o2™êÕ(44”Î;눡—kÞ<øë_!3Tì‘Fv¹ïÅâýgJJ «W¯&!!]»váààÀСCùÕ¯~ň#èß¿¿Ê£""Ò¢TTT°wï^¶nÝÊ'Ÿ|ÂöíÛ©¨¨`ðàÁÄÆÆ2a‚ƒƒŽY¯ãÇó /ðÏþ///~÷»ß1eÊBBBŒŽ&"­ÌñãÇY¾|9Ë—/'??ŸI“&ñÔSO5é}huùý›Hckèﯫ«o¼©f¹¹äОö'‘_*44”¸¸8âââj –-[¦BAW½Ð㜘ÈÃÅÅôm׎þ½{ôÜsô›8çfòÁ‡ˆHScggÇ_þò®¿þz>øàÆŽkt$ieL&QQQõ¾þºpáéééu–E’’’lç­\\\ê¼m=ß­[·&qäA¹¶.µ¯HNNæÇ$//϶þÅûЍ¨¨ûŠððpÚµkgàµ07ZfQDDDZ™âãã‰';;›7òñÇóÊ+¯0sæL<==6l7Üpƒ bÀ€:Ò¯ˆˆ4+çÎc÷îÝ|õÕWìÚµ‹íÛ·sîÜ9üýý1b+V¬à¶ÛnÃÛÛÛ託¥K—.,[¶ŒçŸžW_}•+Vð /`6›ùÍo~Ø1cšÍ¶ˆHó“͆ xï½÷øä“OðóócêÔ©<üðÃtìØÑèx"Ò„¨""rrÉÅgÚÒÖè(""r 988Mtt4YYYlß¾„„æÏŸÏÌ™3éÒ¥ 111˜Ífbbbðòò28uëRTTÄ_|ARR6làðáø¹¹1|øpîž1ƒÈS§¸áäI8xþügËÉÛz÷†ÈHèÕë§Ÿ&“Ñ›#"Òä 8ñãÇ3sæLF¥£K“âììl;}JJJÈÈȨs–‘C‡qìØ1òóómëW^Wq$$$77·ÆØ<¹ ¥¥¥dgg_rvÌÌL¬¤;;;h{l›Íf&NœXãqoÒû„Æ“Ÿ_~ =dtCùøø0qâD&NœÀ¡C‡Øºu+Ÿ~ú)ûÛßxê©§°³³£{÷î 82hÐ úö틳³³ÁéEDD,ŸÁíß¿ßVÙ½{7G¥ªªŠÎ;3hÐ æÌ™Ãˆ#ˆˆˆ0:î/âççÇœ9s˜={6‰‰‰¼ùæ›üá ..ŽaÆqÇw0vìXÍV,"¿Xzz:|ðëÖ­ã³Ï>ÃÑÑ‘[o½•uëÖ1jÔ(4ì[DjÓžADä œå,&ôå°ˆHK×±cGÆǸq㨨¨`ÿþý¶Ù(î¹çªªªèÛ·/£G&66–~ýúa¯£š^sÉÉÉ$$$˜˜ÈçŸNYYýúõc̘1,^¼˜¡C‡âääTûй¹ðÝwpèÐO?׬3g,—›La)ˆDD@Tôí îî»""MÜ‹/¾HÏž=Y¾|9iÀ¦43...?[ÉÍÍ­wæ€;v’’BQQ‘m}“ÉTï,#¡¡¡é‹‘k$77·ÎB—õü‰'¨¨¨ÀÑÑÛcqÈ!µ£!!!zÏÖ”lÙ••ct‘&%""‚ˆˆ¦M›À©S§j °5kyyy8::Ò§OHïÞ½‰ˆˆ 22ƒ·@DDZ²ììl<ÈáÇùæ›oؽ{7ß|ó eee˜L&ȸqãl%F£#7ÆŽËØ±c)((à£>bݺuÄÇÇóÈdÏû› IDAT#pà 7ˈ#ˆŠŠ¢M›6FG‘&®¼¼œ={ö°mÛ6øÏþƒ››·ß~;«V­âöÛoÇ]cDägèJ‘+N:èdt iDmÚ´!**Ѝ¨(âããÉÉÉaëÖ­$%%±bÅ ž}öY:tèÀÍ7ߌÙlfôèÑ:òËUÊÎÎfÛ¶m$%%ñÑG‘––fûÝ.^¼øò·&DG[NÕY‹"{÷þTY¹ -—ûûÿT±þìß\]¯ýÆŠˆ4]ºtaêÔ©Ì™3‡x£#‰\S&“ “ÉDddd½ëÔ7(=99™¤¤¤ƒÒ­·Yß,#”.bq©2Vrr2©©©”••ÙÖ¯^Æ Ål6×x|kpEs³aÜpƒeVG©—¿¿?cÆŒa̘1TUUqìØ1[IdïÞ½¼ûî»äååàëëKdd¤­ ¢¢ˆˆˆ\êÅê?Ïü÷Àk^^^ôêÕ‹èèhüqHXXvvv'o|íÚµc„ L˜0’’6oÞ̺uëX´hO>ù$ :”#F0|øp®»î:}6("TVVràÀ¶mÛÆÖ­[Ù¾};øûû3räHž|òIbbbô½œˆ\BDD®@i*„ˆˆ´rÞÞÞ¶ÙC æ,>ú(S§N%""‚ØØXÌfsý³XHÙWøòË/±³³£oß¾L™2…ØØXú÷ïí>@®«(RY ÉÉpð ¥ òí·°m¼þ:”•ƒtë½zYJ"‘‘pÝuúÀVDZ'Ÿ|’eË–ñÖ[oñûßÿÞè8"Îd2ÙÊÁu)++ãÌ™3ulß»w/‰‰‰¤¤¤PYY €““ÞÞÞ¶ìõGDš«âââz‹¤¦¦RPP`[ßd2Õx \<»G—.]pUA»e¹pÖ¯‡Ù³N"""ÒìØÙÙNxx8÷Þ{¯myZZZA»ûöícåÊ•5Š"½zõ¢G„……ѵkWBCCéÚµ+mÛ¶5jsDDÄ@EEE$''óã?’œœÌ?üPgñÃZ.3fŒ­lhpú¦ÉÅÅ¥F‘óСClÛ¶mÛ¶ñ /ðøããííͰaÃ2d  ÿþ:ê¿H+pþüyöíÛÇîݻٹs'Ÿ}ögÏž¥C‡ 6Œyóæ1bÄzôèatTiÆT¹©¤Ò—¾FÇ‘&$44”éÓ§3}útŠŠŠøâ‹/HJJbÆ ÌŸ?777ÌèÑ£;v,ÁÁÁFG6TVV›6m"11‘¤¤$rsséÒ¥ 111LŸ>[n¹OOÏÆ doo)w„…Áر?-//‡“'-%ël"kÖÀ‘#PQNN–ëDEýtêÛô¡­ˆ´0~~~Lž<™_|‘|P%G‘‹8::@@@@½¥‘ .žž^çŒIII¶óV...µ "ÕÏwëÖ ÆÚD›Ký-[‘XBí¿å¨¨¨Ëááá´k×ÎÀ-C|ü1äçÃwDDD¤ÅèÔ©:u"&&¦ÆòêE‘C‡ñÍ7ßðïÿ»Æûÿ‘êç}}}{SDDäÊÊʪQúøñÇmç/~. £G*~\CDDDððÃSYYÉ·ß~k+ˆ,X°€¬¬,Ú´iCÏž=8p dÀ€ôéÓGŸÃ‹4c¥¥¥ìß¿Ÿ={ö°{÷nvïÞÍ÷ßOEE~~~\ýõÌš5‹áÇӻwïV9»’ˆ4 BDD®@iŒf´Ñ1DD¤‰ruuÅl6c6›™7oÉÉÉ$%%‘””ĬY³˜1c¡¡¡¶un»í¶Ô—’’vìØaû=ìÛ·† B||*¼õç®]»HII¡´´wwwºvíJpp0ÁÁÁXã¼³³³›&"ÒêYÚ––ÆÉ“'IKK#==“'O’’’Brr2çÏŸ,3ÖvéÒ…ÐÐPúõëÇwÞi+†††jfÎF`ooOŸ>}èÓ§3fÌàäÉ“¶Áâ»wïæý÷ß'??'''Ûº‘‘‘ôêÕ‹^½záççgðVˆÈÅN:ÅÁƒmìýû÷óÍ7ßPZZЧ§'QQQÄÆÆòÜsÏ1pà@:wîltdiÁT¹L唓AAèËJ¹<¡¡¡ÄÅÅGyy9»ví²ÍŒ±lÙ2[1ÂZi’ň«P½óñÇSPP`+Â<óÌ3ÄÄÄàââbt̫Ӷ- `9YUUÁñã?•DöKáÄ Ëå>>?Dúõƒþý¡{wKDD¤ âî»ïfþüùLš4IG+i...?[ÉÍÍ­wf†;v’’BQQ‘m}“ÉTï,#¡¡¡áà ‡[‹ÜÜÜ: GÖó'Nœ ¢¢°Ì|ãããcû[2dH­¿¡ìõzV®TI $$Àœ9F'iÕ\]]mƒK/VYYIjjj¢ÈÉ“'ùúë¯IHH ##ÃVËÌ¢têÔ‰   :uêD```ó:ʹˆÈ•)--­³ìqâÄ ÒÓÓIOO'33Ó¶¾““S}ñ¨Q£jÌøÔ©S'½‡o‚‚‚‚ âÎ;ï,ÏÁGe÷îÝìÙ³‡o¿ý–>ø€ììl¼½½éÕ«‘‘‘ôîÝ›ˆˆzõêEûöíÜ ‘VáìÙ³ÍgŸ}FBB ,`æÌ™øùùCll,111xyyœúòòå—_’À† HIIÁÝÝ›o¾™—_~™‘#GÔ’þjg÷Ól"wÜñÓòÜÜŸ "ÀÖ­°x1”•»»¥$e9õï=z@›6Æm‡ˆÈ%<ñÄôêÕ‹O>ù³Ùlt‘VÉd2a2™ˆŒŒ¬wúý[ »Õý[o³¾YF4è¿ù¸TY(99™ÔÔTÊÊÊlëW/ Y‹ÛÕÿÿÓF¯K¥!|ôœ?ÿì""""M½½½mFáÇ׹N}¯?>Ì'Ÿ|R«¬náÐßß“Éd;ñ²Î;ãèèØX›*"Òè¬ûÏÜÜ\N:eÛ^¼,++‹ÊÊJàòÚ ÷ñ-ƒ½½==zô GLœ8Ѷ<77—ï¾ûŽC‡Ù~®]»–3gÎ5?ß«ë$"—ÇúÙz}'ºuëFDD·ÝvDFFê±&"M‚ !""—éGèF7ƒ“ˆˆHKàëë˸qã7nìß¿ß6«Æ½÷ÞKee%}ûöÅl63zôhn¼ñÆ&5ï»ï¾³Ív²}ûvÊËËéׯ&LÀl63lØ0}yg2ÁÍ7[NVååpäìÝk9íÙc™M¤¸œœ ,ì§’HT dY."b°ÈÈH¢££ùÛßþ¦BˆHf2™ˆŠŠªwæ¹²²2Μ9Sgq`ïÞ½$&&’’’btàää„···m A}Åi8ÅÅÅõ=222HMM¥  À¶¾Édªñÿèâ"]ºtÁÕÕÕÀ-’Ví½÷ :´ßiÖ~®¬^YYIff&©©©¶ÁͧOŸ&++‹S§Nñí·ß²yóf²²²(..¶]ÏÎÎŽ:àë닟Ÿ~~~¶óÞÞÞ5N>>>x{{7©ÏÌE¤u©¨¨ ''§Ö);;›¬¬,NŸ>Mff&™™™œ>}š3gÎPUUe»~Û¶mkìë:uêDTT¾¾¾¶Â\çÎñóóÓ¾®•3™L5:huòäI:ıcÇøá‡øá‡ø÷¿ÿMJJŠm6/“ÉDXX˜íBçÎéܹ3ÁÁÁ´mÛÖˆM1DQQ'Nœ 55•ÔÔTRRRøñÇmŸÜÜ\Àò™xHHaaaDDD0fÌÂÃɈˆ sçÎo…ˆHýT¹LG9JxàatiaÚ´ic¸ONN[·n%))‰U«V1þ||||>|8f³™Q£Fب³³³Ù¶mIII|øá‡¤§§ãëë˰aÃX²d ±±±øûû7j¦fÉÁ"#-§I“,ËÊÊààAKAdß>Ëϵk¡¤\]¡OŸš‘=@þ‹ˆ~øaî»ï>Nœ8App°ÑqDä*8::@@@@½¥‘ .žž^ç“’’lç­¬Gû­¯,Ò­[7<<ôYJ].õ»NNNæ¨çQ À›À™Ú¿ë¨¨¨¿ëððpÚµkgôf‰Ô-'6l€×_7:‰ˆˆˆ40{{{ÛûŽŸSPP@FFgΜ©1púÔ©SdeeqìØ1233ÉÉÉáüùóµ®ß¾}{[9¤®ÒÈÅ—yyyið«ˆÔR\\L^^^­bGvvv½¥³gÏÖºwww¼½½m%Œ¯¯/þþþtìØ‘:ˆ»»»[*-IPPAAAŒ9²ÆòŠŠ Nœ8Á?üPc°ûûï¿Ï‰'(,,´­ëããS£ b=oýwÇŽu@iÊÊÊÈÊʪQø°–>¬çsrrlë»»»LXX7Ýt<ð€­8¤Y—D¤YR!DDä2å(á„CDDZoooÛì!ÉÉÉ$$$˜˜È£>ÊÔ©S‰ˆˆ 66³ÙÌСCqºÆ³HTŸµ$!!/¿ü{{{úôéÃoû[bccéß¿?vvv×ô~[%GGè×Ïr²*+ƒC‡~*‰ìÙ+VXfñð°”C®¿ÞR4¹ $"­ÓwÜA‡X¾|9Ï?ÿ¼ÑqD¤8;;Ûf©OII µ ¶#æççÛÖ¯^d¨«8‚››[cl^£¹Ôl,ÖgffÚŽêììL`` íwc6›é9¼'ÿñoÊæ•ñë²_3Ýi:Cbð–‰\¥þÓòÞç®»ŒN""""MH»víèÞ½;Ý»w¿¬õsssÉÈÈ 77·ÖÉ:‰õϹ¹¹œ>}šŠŠŠZ·ãââb›éäR§¶mÛÖZ·cÇŽ$(ÒÄר”””ÔZv©SIII­Û¼ø±ÀÀëÜWˆ³³³[/RS›6m.ùÙ^õÙh«fuøða>üðCNž}àÁ-ËÊËáÈKIdï^ز,€ÊJð÷ÿi‘¨(2Ú·7vD¤Åqrrâàí·ßæÙgŸÕ—;"­˜‹‹ËÏ–F¬µê*DìØ±ƒ””ŠŠŠlë[¿¤«k–‘ÐÐP‚‚‚pph:gçææÖYˆ±ž?qâ„íKHGGG|||lÛ2dÈZÛRç~µ„H`‘Ó"¢‰¦=¸Ÿû‰#¦ÆÞl‘«÷Ö[ð›ß€f±‘_À:øúr•——ÛŽèöìYòòòÈËËãܹs5~æåå‘™™É÷ß_ã² .ÔºM{{{<==1™L¸¹¹áææ†»»;^^^¸ººâææ†‡‡¸¹¹áêꊗ—îîî¶õ­×uuuÕ,Ò*PXXHQQ¹¹¹RXXÈùóçÉËË£¨¨ˆÂÂBòóóÉÏÏ·]~îÜ9 l—çææ’——g;ØBuÎÎÎxyyáééYãg@@={ö¬ó²ê3 ©ø%-UÛ¶m/ù¹^yy9§NâäÉ“dffrêÔ)NŸ>MFFYYY|ñŶeeee¶ë999áëëk{ uèСμªÏä¥ï[—sçÎÙf_:{öl309sÆvþôéÓ”––Ú®oý³Î¸Ô±cGúõ뇯¯/tìØ‘   ´‘V©é|ƒ&"Ò„UPÁ0…)FG‘VÎÕÕ³ÙŒÙlfÞ¼y$''“””DRR³fÍbÆŒ¶‡ÙlfäÈ‘õ~¡T\\ÌÎ;m×ß·omÛ¶åÆodæÌ™˜Íf¢¢¢y ¥^i9MšdYVP`™=ä?ÿ¯¾‚åËáÙgÁÞzôøi‘Aƒàºë,E‘_`âĉ¼øâ‹|þùç 6Ìè8"Ò„YjEFFÖ»N}¥ ëkÜê¥ ëmÖ7ËÈ¥JWêRe–äädRSSk|á]½Ìb}-^=_ppðU é‚ ãþûß^ö²”¥<ÏóÌe.ws7ñ}éû‹·Y¤A}õ8ÿ»ÑIDDD¤•qpp° ¼ÅÅŵÊ#ÖŸ¹¹¹¶Aéœ;w޳gÏ’ššZç öKñôôÄÍÍ <<}šœœÛ ÿï¾ûÎVÊÌÉÉ©U´tppÀÛÛ///<<Ⱥ¬]»v899áééYãù¤z¡Äú\h½=À¶ï‚ÚÏcðó…˺žë.vñsÂŬ¯ª«¾·¾†€šÏÖß™µÐ?='X_gØž÷¬¯+òóók,«¬¬¬7›Édª³ÀZ£ÜS}f¼½½m¯-DDäÚP!DDä2|Í×8áDFG©—³³3¿úÕ¯øÕ¯~Å‚ ÈÈȰ•CæÌ™Ãão>@ûôöÄÄÄðÚk¯që­·`pr¹¦üý!6Ör¨¬„ᅦ/¿„;aófX²ÄrY0x0 7ÞÝ»C=_¶‰ˆXÝ{ï½Ì™3‡%K–¨D(" ÊÑÑ‘àà`‚ƒƒë]§¸¸¸F™£ËÛop躸°wï^ÛÑyëâááa+w„……1|øp[ÑÃZ¹ÒCÁOâþûß^ö²ˆELcñÄ3žñÌ`=éitL‹óçaõj˜5Ëè$""""†j×®]ƒv·Ü´Î;GEE…­PWyÀ:€´zy zÂ:´  €Ó§O؆^<8õâBC]%¹2—c.ˆl„loo§§guìíí j–b¬%ꜭƒ¡«Zöòò¢M›6xzzÚÖ­^ ¹îî»_ò³¾K©^ÉÏÏçüùóPZZʹsçl%‰üü|ÛŒFÖçµôôôZËà§R\õÒDKaÝçW/ƒV/Í899áááAûöíqqq±ü<<zõUÛ:ùùù¶YF[é£%|iEïð XÀÛ¼Íë¼Îr–3‚ÄÇÿð?8èk1Ò[oAYLžlt‘Í:›‰õèæMÑÅG@¯~ts¨{VŒê~î(åu©ïˆò .à±Ç«uYõ³—«z)£.W3[ŠˆˆÔÔX%„ê¥Çêåk鲺KÍ\rñõës9³Y]üün™iÆŒìØ±ƒ­[·ü³3–ˆˆHË¥o‚DD.ÃNv2˜ÁFǹjmÚ´¡}ûö–ó¨ Òêyx€Ùl9e€Ö×_Ã_XN˗óς£#ôëg)†  7Ý>>ÆfÃpýõד Bˆˆ4-éé°?¼øbÅDFFiP°†ç‡ñÄóO°•­,e)ws7¾ø2‰I<ÌÃt¦³Ñ1¥µ©ª‚×^ƒ‰ÁÛÛè4""""b°¦TxX»v-ãÆ38‰ˆˆ45NNN¶RES.Z¬^½š›nº‰É“'óÅ_¨ ""ÒŠÙÿü*""­[1ÅìcCbt‘†áèh™dÆ X³ÒÒàÄ ËÑ| ‚Ï>ƒqãÀ×z÷†Gµk!+Ëèä"bQ£F‘˜˜xÅGeiPë׃›Ü|³ÑI c=f̬a G8Â$&ñþAºK,I$QE•Ñ1¥µøè#øþ{xøa£“ˆˆˆˆˆˆˆˆ´8mÛ¶åý÷ßçÔ©SLœ8‘ª*}î'"ÒZ©""ò3v³›RJU‘Ö%(î¹–,}û /6o†Q£,³‰ÜwøùA×®0i,]j)‘ˆH«0zôhNŸ>ÍÞ½{Ž""ò“„9œNÒ$t¥+ó˜G*©¼Ë»”PB 1t§;ó™O9FG”–nñbˬ„½{DDDDDDDD¤E fݺu$&&2oÞ<£ãˆˆˆATù;ÙI„bt㸻[sÍ›;vÀÙ³°e Lœ§NÁ£BHHÍ‚ÈñãF§‘rÝu×̇~ht‹óçáÓO!6Öè$MŽ3ÎŒc[ØÂ!q·1‡9ÈxÆ“D’Ñ¥%úúkËû…Ç7:‰ˆˆˆˆˆˆˆH‹ÍK/½ÄÓO?Mbb¢ÑqDDÄ*„ˆˆüŒÏùœh¢Ž!"""Ò´¸¹Y "³g[zåäXf¹çHI±DBC-§)SàÝwáôi£S‹È54räH>þøc£cˆˆXlÞ eepûíF'iÒzÒ“E,"ƒ ³˜£%†0€¥,¥B£#JKñâ‹pÝu–Y{DDDDDDDD¤AMŸ>x€{C‡GDD™ !""—PJ)ÛÙŽ³ÑQDDDDš677ˆ‰çŸ‡íÛ!7¶mƒÉ“áØ1ËO??è×þøGË Íâb£S‹È/0lØ0öíÛGa¡‹HƒƒÑIš…v´#Ž8ö³Ÿ=ì!Š(f0ƒ˜ÊTrÐèˆÒœýø#¬[O=vvF§i^}õUzöìÉwÜÁ¹s猎#""H…‘KØÉN )T!DDDDäJµm 7ß ÏÿÜ2èsýzˆ‰???ÞyòòŒN+ÒªôîÝ{{{8`ti­6l//2Äè$­F8áÌcé¤ó/þE:éÄCOz2Ÿùä’ktD1ÂêÕðÙg°h‘e@1”££#«W¯¦¼¼œûî»ÊÊJ£#‰ˆHQ!DD¤Åó Ÿ‹Ž.)"""Ò¤88@t4Ì›‡ÃÁƒ–YC²²`ÊKydÄxùeøá£ÓŠ´xnnntíÚU…1NBÜ~;8:¤ÕqÁ…qŒc';ÙÆ2”çyž`‚™ÊT ç†V£¨âãáþûáúëN#""""""""ÿÕ±cGÖ­[ǧŸ~ÊìÙ³Ž#"" D…‘:lb%”0ŠQFG‘K‰Œ´B>û NŸ†U« [7øË_,?##aöl8tÈè¤"-Öu×]Ç7ß|ct iÎûiæ01TQ¼Á¤“Î˼ÌNvÒ—¾ `KYJ1ÅFG”†4k–åñøâ‹F'‘‹ 8E‹1wî\>úè#£ãˆˆHP!DD¤ïñCŠ~FG‘Ëe2Á¸qðÆžn j6òe–bHd$Ìœ ;vT¤E‰ŒŒäðáÃFÇ‘ÖhãFËÏ[o56‡ØxâIqä Ÿó9¡„2iÀt¦“L²ÑåZûòKxåøë_Á××è4""""""""R‡©S§2iÒ$î»ï>Ž?nt¹ÆT¹HE$’È&EDDDD®–½=DGâEšúS9ä_ÿ‚›n‚ÐP˜>ÝR©ª2:­H³Ö¥KRRR¨ÒcID[B‚åyÝd2:‰Ô!šhÖ°†“œd&3YÏzºÑbXËZÊ)7:¢üREE0y2Œ÷ßot¹„×_î¸ãŠ‹5£¯ˆHK¢BˆˆÈEÖ³ž \àî0:Šˆˆˆˆ\ ÕË!iipð Lš}¤rˆÈ5JII ™™™FG‘Ö¤¼Ü2CHl¬ÑIägøáG<ñ$“Ì&6aÂÄÝÜM0ÁÌd&©¤Q®Ö£Bv6¼ù&ØÙFDDDDDDDD.ÁÅÅ…5kÖpüøq{ì1£ãˆˆÈ5¤BˆˆÈEV³šŒÀ_££ˆˆˆˆHCˆŒ„Ù³áèQص îºë§#Œwí O=‡R¤ÙèÒ¥ €¦‘Æõùç› £GD.“=ö˜1³†5|Ï÷Ld"+XAºK,I$Q…Ê¹ÍÆÊ•ðÀŠht¹ aaa¼óÎ;,]º”·ÞzËè8""r¨""RM6Ùld#÷pÑQDDDD¤¡ÙÙÁõ×ÃK/Ar2ìÙ&À¿þ–Ë^{ Ξ5:©H“ˆ³³³ !"Ò¸,%ϰ0£“ÈU#ŒyÌ#4Þå]J(!†zЃùÌ'‡£#Ê¥8qqðÄð?ÿct¹cÆŒáÿ÷y衇øú믎#""×€ !""Õ¬d%N8q'wEDDDD[T¼ø"¤¤XŽ:Þ·/<ù$øûCl,¬] ¥¥F§irìíí ääÉ“FG‘Ö$1Ñòü,Íš3ÎŒc[ØÂ!1’‘ÌaèÄxƳ“FG”‹¥¥Á¨Qpà 0w®ÑiDDDDDDDDä*¼øâ‹ 4ˆñãÇsîÜ9£ãˆˆÈ/¤BˆˆH5oñã;îFG£ØÛCt4¼ñdeYf ¸ûnðóƒ©SaÇc3Š41>>>ääèhî"ÒH†cÇTiazÒ“E,"ƒ ±ˆ#!šh0€¥,¥B£#J~>Œ žžðÿF'‘«àààÀš5k(**bÒ¤ITUUIDD~BDDþë[¾e?û¹ŸûŽ"""""MEÛ¶0n$$À‰Ÿ~ 7Ý0{¶e¹H+çããCvv¶Ñ1D¤µØ°:t€ë¯7:‰4€v´#Ž8p€=ì!Š(f0ƒ@™ÊTrÐ舭SAÜ~;œ97‚Édt"ù:vìÈÚµkÙ¸q#ýë_Ž#""¿€ !""ÿµ”¥„F4ÑFG‘¦(0ÐR9rvî„¡CañbèÚÕr¤äÄD¨¬4:¥ˆ!¼½½5Cˆˆ4ž„5 Ú´1:‰4°(¢xƒ7H!…'y’-l¡7½ÀÞáÊ(3:bëPXh™‘çØ1ؼ‚‚ŒN$""""""""×À7ÞÈœ9s˜9s&Û·o7:Žˆˆ\%BDD€ x‡wxˆ‡°ÃÎè8""""ÒÔÝx#üýï‘«VAI Œ¡¡ð •etB‘Fåãã£Bˆˆ4ŽœصË28]Z _|‰'žø-l!”P¦0… ‚˜ÉLN ÛLv6˜Í–Rô§ŸBd¤Ñ‰DDDDDDDDäzâ‰'øõ¯ÍøñãÉÈÈ0:Žˆˆ\BDD€7y“rʹŸûŽ"""""͉‹ ŒIIðý÷pçð—¿XŽšü›ß€Ž¤#­Dûöí9{ö¬Ñ1D¤5HLˆ‰1:‰À{̘YÃRHa3XÉJB %†Ö²– *ŒŽÙr;ƒ[ÊΟ~ ={HDDDDDDDD®1;;;Þ|óML&÷ÜsåååFG‘+¤Bˆˆ´zUTñ7þÆ$&ÑžöFÇ‘æ*<ÜRIKƒ¥K!%† ƒ¨(ø×¿ ´Ôè„" ÆÅÅ…’’£cˆHk#F@»vF'ƒH<ñüȼÇ{L`ÝéÎ|æs†3'læ¶m³ÌŠg2Á—_B÷îF'‘Ò®];Ö¬YÃîÝ»ùÓŸþdt¹B*„ˆH«÷1s„#ü?EDD¤Á}rÃ'|øá‡u^vàÀ:uêd;âÇgŸ}Æ Aƒpqq!$$„+VÔX¿  €?þñtëÖ WWW<==‰‰‰!11±Á·C¤IkÛ&O†]»`ï^ˆŒ„´Ì2{6dg7Ø]ÛÙÙQQQÁœ9s ÁÙÙ™ððp^}õÕZë&$$pà 7àææ†››7ÜpC½û‘ŸãììÌ… ŒŽ!"-Ý… °y3ÄÆÚé¹OœpbãØÂs˜»¸‹, 3Ïx’Hj°û¾ÔßP³}UU À-·Àðá–bHÇŽ›ADDDDDDDD]ïÞ½Yºt)/½ôï¿ÿ¾ÑqDDä ¨""­Þ–ð+~EozEDD¤Áu›ÞW^y¥ÎË–,YÂïÿ{8räwÝu=ö§OŸfÍš5Ì;—O?ýÔ¶þäÉ“)//'))‰sçÎqüøq¦OŸÎ’%KikDšþýáwàÇ-%‘Å‹!8~Ø2ƒHøÃþ@qq1IIIœ={–eË–±páBV¯^m[g×®]Üÿý<ú裤¤¤püøq¦M›ÆÄ‰Ù½{wƒä’–ÍÉÉI…ixŸ~ pûí5ë¹O¬ºÓyÌ#t–²”øbˆ ‚E,â<ç¯éýMŸ>½e½¿ÊΆ;î€?ý æÏ‡Õ«ÁÍ­ñî_DDDDDDDD uï½÷òÛßþ–)S¦ðã?GDD.“ !"ÒªýÀlbðˆÑQDDDE§»:qøða¾ûî»Ësrrxÿý÷‰‹‹`îܹ̜9“»ï¾ ÄÂ… y饗l×Ù²e ³gÏ&88GGGÚ·oÏèÑ£Ù´iS£n“H³Ð¹³eP]jªåˆË7B·n0i:tMïªcÇŽÌ;—°°0ÜÜÜ6lK–,aáÂ…¶uæÏŸÏóÏ?Ï=÷ÜC‡ðõõå¾ûîã¹çžcÞ¼y×4´š!DDEBôëg)WV£ç>¹˜ .LbûØÇöp7ñO@S™Ê\“û¹ë®»ZÎû«?†ë®ƒ}û`ëVxüq°³kœû‘&cñâÅ„††ò›ßü†ÒÒR£ãˆˆÈeP!DDZµWy•Îtf4£Ž"""Ò(ìíùÃþPë(¶Ë—/'66___¾øâ ÆŒSc¡C‡ràÀO§"""øÿïÿ‘žžÞðÁEZ 77Ëì GÂ?þ{÷BïÞ–#1ïÙsMîâþûﯵlðàÁ=zÔöï}ûö½1cưwïÞk’CZBD¤Q|ø!Ôñü¥ç>¹”(¢xƒ7È ƒ—y™ì¤/}À–²”bНú¶›ÿû«sçà÷¿·Ì¼3|887ÝÔ¸DDDDDDDD¤ÉpqqaÍš5=z”øøx£ãˆˆÈeP!DDZ­óœç-ÞbÓhC£ãˆˆˆ4š¸¸8Ö­[Gvv6üíocÚ´i¶uRSS ÇÎÎÎvjß¾=§N²­óÞ{ïqæÌÂÂÂèÙ³'<ðëׯ§ªªªÑ·I¤Ùqp€‰áàAøàÈÈ€!&¾þúÝtHHH­e&“‰ÜÜ\Û¿³²²l«ëر#™™™¿èþ¥ujÓ¦ FÇ‘–lÿ~HI©³¢ç>¹žxG9Èç|N(¡Lc0é$“|U·Û¬ß_­_‘‘°n¼û.¬\ ^^ w""""""""Ò,„……±lÙ2-ZÄ|`tù*„ˆH«µœå”Qƃ}ºÖòÓ§OãççwU÷+""Ò Àߢ¢j]¤ç>¹RÑD³†5œà3™É|@7ºC kYK9å—}[ÍòýÕñã–êÆŽ…#àða˜0áÚ߈ˆˆˆˆˆˆˆ4[ãÇçÁäÁ$%%Åè8""r *„ˆH«TB /ó2ñíiot‘F7}út^ýuÊÊÊX²d <òHˇÎúõë/ë¶œéÓ§qqqlܸ‘Õ«W7Dd‘–oÔ(Ø»×rtæ}û "ââ Ú‘£¯•þýû“Pkù† èß¿ÿ5¿?‘_,!ÆŒ±”)¯‚žû¤.þøO<Ç9Î&6á‚ ˜@0ÁÌd&i¤]Öí4›÷W……ðôÓ–×™‡ÃæÍðÎ;àí}íîCDDDDDDDDZŒW_}•   &L˜@ii©ÑqDD¤*„ˆH«´ŒeäÃã‘âÇÒúö…ß~ƒO>1„Ô¯o¾ƒ³éÞ#æ”»»;³gÏ&44”š5kR³fMBCC™3gÿú׿òàDDDòЪUP¦ ¼ðÂCoB÷INÕ¥.A‘H" YÈE.â7OðÁs Ù>¯Ðþûê—_ ukø¿ÿ3ÿz耭í£mWDDDDDDDDJ„Æ3eÊ&Mš”mf1ž•ÑDD Zœæ4£mtCyyyQºti˜íúfÍš±fÍš{>ßÓÓOOÏ|J'"ØØ€¿?tí ãÆAŸ>0g|ñ<ñD–á¦û‹Ü½®K—.téÒ%¯‹ˆˆä½¨(s';»,«ôwŸä[léö÷ׯüÊ—|É'|B øàÃp†ãŽ{ÆøB÷ï«cÇàý÷á믡eKs—§ŸÎ»í‹ˆˆˆˆˆˆˆH‰Ñ¿6oÞLÿþýÙ³gÎÎÎFG‘;¨Cˆˆ”(·¸Å$&1€8£7¦""Rr]ºt‰àà`zõê¥ÿ¬) œÍÝAv쀫W¡Y3øøcHI1:™ˆˆHþJJ‚ ÀÇÇè$R‚5¤!a„q‚„F<ñ´¤%ÍiN8᜼t²ðüûêüy5 40¿w\¼6oV1ˆˆˆˆˆˆˆˆˆ<’3fP±bEÞzë-nß¾mt¹ƒ BD¤D™ÃNp‚wy×è("""†±°°ÀÙÙ™}ûöbtÉ瞃Ÿ~‚)S`Ò$psƒ;N%""’¢£áæMèÐÁè$"”£¾øG»Ø…nøYøáâì¼}óèÒϸp7n@p0Ô­ óæÁGÁн;XX—KDDDDDDDDŠ–-[ÆÎ; 2:ŽˆˆÜA!"RbÜâAÑŸþÔ¤¦ÑqDDD c2™¸~ý:ß~û-åÊ•3:ŽˆäV©Ràë qqP­´l cƨ[ˆˆˆOQQðì³ðøãF'ÉÄ 7f2“Ó¦Ó|zýS,¾µàùrÏÓŠVDÉ-nLÛ·!276wóóƒ#G lm &ƒˆˆˆˆˆˆˆˆ”Mš4a„ |øá‡ìرÃè8""ò7„ˆH‰±…$’HFGytµkúuðÕWðÅàî‡JDD$ï˜LðÃàãct‘{zŒÇ €ßùh¢q‰7xƒÔ`,c9ƱüÙqz!H“&ðæ›Ð®¹$(Ê—ÏŸ}ŠˆˆˆˆˆˆˆH‰7bÄ^yåÞ|óM._¾ltA!"RB$‘D ô¥/®¸GDDDD$oXX@¿~°?ØØ@³fft*‘¼ñóÏpâtêdt‘*E)¼ðb)KI ?ü˜ÃjSo¼‰$’4Ò}Gii°h‘¹äÿþš6…ƒaÆ xì±Gß¾ˆˆˆˆˆˆˆˆÈ}XXX0{öln޼ɀŒŽ#""¨ DDJˆPB¹À 4:ŠˆˆˆˆHÞsu…M›àw`ÄèÛ’“N%""òh¢¢ fMó…ï"Eˆ3ÎÈŸüÉ7|@zðOL0ç8—û^¹b.úhÔzõ‚gž`ñb¨W/@DDDDDDDDäÞ™;w.Ë–-cþüùFÇ)ñT""ÅÞ9ÎL0£M5ªGDDDD$ØØ@HˆùâÙo¿…—^‚3gŒN%""òð¢¢ÔDŠ4lèF7¢‰æW~å5^c“pÁ…ît'†˜o$. =Ú´_…  aÃü?‘l´mÛ–#F0xð`âããŽ#"R¢© DDнø{ìÅ(££ˆˆˆˆˆä¿àçŸáÜ9psƒ_~1:‘ˆˆHî?ûöÑIDòDD'8A8áüÎïxãM#F׸öÏàƒ!0ÐÜ ¤Y3ذÞ{Ïüç"<\ADDDDDDDD¤P ¢qãÆ¼ù曤¤¤GD¤ÄRAˆˆk‡9ÌLf2žñØcot‘‚Q¿>lÛuë‚§'lÝjt"‘ÜY¹Ê–5wC)FJSš^ôâ~a»ð0µâ?¦q8ÝrÄoc}öµ}ž|fφ—_†ØXˆ‡€¨\Ùèø"""""""""¬­­ùúë¯9|ø0ï¿ÿ¾ÑqDDJ,+£ˆˆä§±Œ¥.uéMo££ˆˆˆˆˆ¬Ê•aÝ:xã h×Î|aí‹/JDD$g¢¢à•WÀÆÆè$"ïöm¸|.]‚+WÌßÇŽ™;}$$à¶?3`’eKÞ²&lôIÂ×_ÇíúøÚ §§E/ÊPÆè£¹§ºuëÊ€ðööÆËËËèH""%Ž BD¤ØÚÁV°‚5¬ÁJ?îDDDD¤$²±%K o_hß"#ÁÇÇèT"""÷wílÞ ³fDJ²””ŒÂ þú .^4vÜùëÕ«æóõÖ­Š?nÞ4\»–u»66P½:Ô¬ nnЯåŸz ߦMPÚ– l Ü>œ! å}> /}ñÃZÔ*ÐÃÉ©~ýú±víZúôéþ}û¨T©’Ñ‘DDJ]!-"Å’ ïò.žxò ¯GDDDDÄ8––0gŽù÷ݻÆ àînl&‘ûY»RSÍBD ¡C°gÄÅÁ¾}pàœ8a.ò°¶†Š¡Bówúïk׆2e tió¸ ÀÂÂ\ôQ®”/o^V¾ü?«V…R¥²axýýuŠSÌg>3˜A!¼È‹øâË«¼ª›ßˆˆˆˆˆˆˆˆH¡ÎSO=Å€X¾|¹ÑqDDJ}j "ÅÒ"±“ìf·ÑQDDDDDŒ—^rù2¼ú*ìÜ µt—i)¤¢¢ eK¨RÅè$R\ÅÅÁưe lÛgΘ‹>žxš6…!CÌÅ®®æNŽŽ±Õ €ÑŒæG~$Œ0zЃjT£'=Â\p)ð\"""""""""Ù©P¡´mÛ– гgO£#‰ˆ”*‘bç WÃüð£ÍŒŽ#""""R8XZÂâÅàé :Àöíæ;V‹ˆˆ&ii°f DŠ“ädØ´ V®„U«àøq¨\Ù\x4f ´jO?mîìQÈ”¢TF×ßøˆ¿¿&3™ÎtÆ_^â%,°0:ªˆˆˆˆˆˆˆˆ”p^^^øûû3tèP<<áÉáFÇ‘"(99™nݺñøãEDDD$oÙÙÁòåðÜsЧ|÷XèâA)Dvì€sçÀÇÇè$RÔ¥¤Àúõ°d‰¹äÊsÑGïÞЩ¸¹¹÷Aõ¨GA|ÄG¬d%á„ã7 h@_ú2€T¢’Ñ1EDDDDDDD¤ âǤgÏžlÚ´ KKK£#‰ˆ{TrâÄ ÜÝÝqssË«<""d7»Ì`æ0‡ž½ÕvNÎîÝ»9qâ„ BDDD¤xª^–.5w ùòK4ÈèD"""ÿˆŠ‚ºu¡A£“HQ”š ?þh.ùî;¸t Z´€?†W_5¿*l±¥Ûß_¿ð 3™É'|B t£#ÁÓ#@Ý EDò])£ˆˆä•ÛÜæÞÁwÞâ-£ãˆˆˆˆˆ^­ZÁ¸q0r$ìßot‘¬\ ]ºBŠ’Û·aóf<œœàå—aß>øÏ !¶mƒaÊM1ÈÝžáf2“œ Œ0ö²—gx†æ4'œpnpÃèˆ""""""""RÂ4nܘñãÇóþûïóóÏ?GD¤ØSAˆˆ3˜Áöð%_b…ÑqDDDDD ·?„fÍ wo¸uËè4"""pä:>>F'‘¢à×_!0êÕ3w>K/ ‰‡Ÿ†wß…5ŒNY`ÊQ_|ÙÇ>v± 7ÜðÇ'œðÃ4:¢ˆˆˆˆˆˆˆˆ” £FÂÃÃ޽{“””dt‘bM!"R,üÅ_¼ÏûŒbh,SL^ IDATdt‘ÂÏÊ ,0_xbtøþ{¨T ÜÝN"…ÕéÓ0y2<ó 4jsæ@pð ù;0ê×7:¥áÜpc&3I qŒc=ëy’'iE+"‰ä*‘üUªT)æÍ›ÇéÓ§;v¬ÑqDDŠ5„ˆH±0‚”£ïñžÑQDDDDDŠŽºuÍBÆ7ße[DDÄHQQо½¹hQ$ݰp!´k..ðÑGæ‚M›àèQ˜8Ñ\"Y<ÆcÀŽM4N8ñ:¯S“šŒe,Ç9ntD)Æ\\\˜:u*S§N%&&Æè8""Å– BD¤È[Ïz³˜iLÃ{£ãˆˆˆˆˆ-£F™/¢4L&£ÓˆˆHIuélÛ>>F'‘Âb÷nð÷‡êÕ¡OHM…Ù³áÄ ˜5 Ú´Rúˆ#'JQ /¼XÊRŽq _|™ÃjQ o¼‰" z(""""""""yïÍ7ߤ[·nôéÓ‡‹/GD¤XÒ§%’Á £cI…eî KŽ‚t…+ `Ý膺`@DDDD$׬¬à«¯ 6/6:ˆˆ”TkÖ˜mÛÖØb¬ `Êxê)hÞbbà½÷àÔ)ˆŽ†^½À^7„yÎ8H ò'ßð éL}êL0ç8gpB)n¦OŸNZZ£F2:ŠˆH±¤‚‘(‰…EÅ(FqƒLcšÑQDDDDDŠ.77èÛÆŒë×N#""%QT”¹ãC… F'‘‚vû¶¹0ÕÏÏÜ dìXhÐÀ\ràŒ ŽŽF§,vl°¡݈&š_ù•×x`‚qÁ…ît'†£#ŠˆˆˆˆˆˆˆH1Q¥JÂÃÙ3gß}÷ÑqDDŠ„ˆä“É„Éd2:F¡ÉQP6°"ø‚/¨JU£ãˆˆˆˆˆm&ÀµkðÙgF'‘’æÖ-X»|ÔýµD9y‚ƒ¡^=ðð€Ý»aòdøë/Xº¼¼@7é) h@Aœä$á„ó¿á7hDa\ãšÑEDDDDDDD¤ˆóññ¡W¯^ 83gÎGD¤XQAˆˆIW¸B?úÑnt¥«ÑqDDDDDоªUaÜ8 Ó§N#""%É–-pé’ BJ‚”s7˜îÝ¡fMsAˆ—ìÝ »v¯/”-ktÊ«4¥éE/ö°‡]ìÂþÃpÆ?üØÇ>£#ŠˆˆˆˆˆˆˆH6eÊJ—.¿¿¿ÑQDDŠ„ÈCIII!88˜fÍšagg‡Íš5#$$„[·nekaa……iii|òÉ'¸ººbkkKÆ ™3gN–m9r„N:áàà@•*U8p 7nÜÈØNNÄÆÆÒ³gO\\\°¶¶¦råÊ´mÛ–õë×g›ššJXXÍ›7ÇÁÁ;;;<==‰ŠŠÊÈ÷±Ü%»lù9G÷’]޼Þö7ðóó£J•*”-[–W_}•„„„lÇ&%%1tèP{ì1¬¬¬jnîu>”ŸUžãçŽ3•©9Þ/ÀÂ… ñôô¤B… ØÚÚR·n]ÆŒÃåË—3Ƥ¦¦2cÆ Z´hAÕªU±±±ÁÉɉ®]»“ã1"""""EŽ¿?T¬h¾8SDD¤ DEA“&P«–ÑI$¿ÄÇÃØ±P½:té/¢Eæ"Ô™3á©§ŒN(wqÙÌä'!„XbyЧhNs '‰$£#ŠˆˆˆˆˆˆˆHS¾|y"""X²d K—.5:ŽˆH±aõà!"™%''Ó¶m[¶lÙ’iy\\qqqüðì[·kkëLë}}}™={vÆãC‡ѯ_?èÖ­çÎÃÃÃS§NpýúufΜÉÙ³gs•ÑÃÃ#Óã .MLL ß}÷;wàÖ­[tìØ1K¡ÈæÍ›Ù¼y3&“)WûM—Ÿsô°òjÛ={öäÛo¿Íx¼bÅ víÚE\\•*UÊ2vùòå™–åfnîu>|ü |t‡ª‘U³Íx÷~M&o½õ‹-Ê´üÈ‘#„„„°fͶoßN¹råðóóË4W§Nbùòå,_¾“É”£1"""""ENéÒ£GèQàâbt") V¯6wŒâ%9–-ƒ3`ûvsÁÏ!з¯Þc!¨€/¾¼ÍÛüÈ„Î;¼Ã|@úà‡µP1—ˆˆˆˆˆˆˆˆäŒ——¾¾¾ <˜Ö­[óøãID¤ÈS‡ɵÐÐP¶lÙB… ˜={6gΜáÌ™3DDDP®\96nÜHhhh–çEGGÅåË—9~ü8:uÌmÀÒsêÔ)\]]Ù´iW¯^eÓ¦MìÞ½;WÛµkÇ?üÀ¥K—HII!11‘É“'c2™˜0aBƸ)S¦°~ýz ãÏ?ÿäæÍ›lß¾.]ºdº°ßd2eú6bŽV^m{÷îÝ¯ÍÆ©Q£‰‰‰gsá;w²zõj®^½š1_¹™›ì·Õ[W³Ów',–eŸ1»ýFDD°hÑ"œYºt)gÏžåÆlß¾gŸ}–ƒ2qâD¾ùæILL$%%…óçÏEÛ¶msàäÑÑæ×ùý÷U RD•¢^x±”¥ç8#ÁbS—ºxãM$‘¤’jtL)þûßÿR±bE|}}Ž""R,¨ Dr-½ÃÂäÉ“éÛ·/ŽŽŽ8::Ò¯_?>ÿüóLcîAÇŽ)W®Õ«Wϸè?...cLTTS§N¥M›6888ЦM¦Nš«Œ£FbÆŒ<ñÄØÙÙáââˆ#Ø¿Ƹ ðùçŸ3lØ0\\\°µµ¥E‹|÷Ýw¹ÚçòsŽV^mûÎׯÓÓ“°°0àŸ×îî±íÛ·ÇÁÁ!cYnæ&»óaY«eØW¶‡¡÷Ïx÷~gÍšÀâÅ‹éÖ­UªT¡L™2´hÑ"£¸#½óI5ؾ};Ÿ}öóçÏçÂ… tìØ‘uëÖåxŒˆˆˆˆH‘dk ãÆÁìÙpæŒÑiDD¤¸[¹ªV…ýËè$ò¨bcÍ^j׆ˆs'#G 2¼¼ ”>Ž(.ªQøƒ?XÁ zÐW\ $3è=¤ˆˆˆˆˆˆˆˆÜ›½½=sçÎeõêÕÌ›7Ïè8""Ež>‘\;|ø0;v̲.½ëD|||–u™»ººpõêÕŒe ´jÕ*ÓØ»ßÏ’%Kðöö&**ŠÓ§O“ššù®d7oÞÌø}ú±¤wÉ+ù9G+¯¶}÷vÚ´iÀÑ£G³Œõôô̲,7ss÷ù°ŒeÌe.ÓoM‡³÷ΘÝ~Ó ^xᬬ¬°´´ÄÒÒ’R¥JQ§NLû‹ˆˆÀÕÕ•õë×ÊÛo¿M½zõh×®gþ¾ .'cDDDDDЬ¾}¡|y˜>Ýè$""RÜEEAÇŽ*(ª®^…ðphÒ<<à?Ìï (ÈÜ%DŠ-K,ñÁ‡h¢‰'ž·x‹iL£:ÕéNwbˆÁĽ;m‹ˆˆˆˆˆˆˆHÉÕ²eK†ÊðáÃ9qâ„ÑqDDŠ4}Ê&¦téÒ™[XXäË~ÆÉdbàÀÄÇÇ“””„ÉdâÚµkù²¿¼”ŸsTPó§Š+æÙ¶þäOüðãÞ¡ý­ö¹Þ¯Édþà1--´´4nß¾ÍíÛ·3–ܺu www~ûí7¶mÛÆ¤I“èÚµ+¬]»ÿ)²J—??˜6 ®_7:ˆˆWgÏÂO?ÑI$·âãaìXsÁ‡¿?<ý4ìÙ»v¯¯ù½„”(õ¨GAœà YÈE.â7 iH0Á\à‚ÑEDDDDDDD¤™8q"ŽŽŽ 8Ðè(""Eš B$×êׯÀš5k²¬[µj 4x¨m§w­ˆÍ´|Û¶m9ÞÆ‘#G ¢~ýú…6lÈ26=ç÷ßßm¦OÜÝmä^òsŽŒ¶uëÖL7oÞ @­ZµrôüÜÌMúù°uÛVzÑ‹ÇyœILÊÕù®qãÆüôÓO˜L¦{~§³²²ÂÝÝÑ£GI\\ëׯÏÕ‘"ëwàÆ øúk£“ˆˆHqµjXY——ÑI$'ÒÒÌ]¼½¡aCX¾Ü\râÌŸÍšP [léF7¢‰f»hC>æcœq¦½ØÃ£#ŠˆˆˆˆˆˆˆH!aggÇܹsY³f _ë3I‘‡¦‚ɵ7Þx€áÇ3þ|Î;ǹsç˜;w.#GŽÌ4&·:vìÀ°aÃØºu+ׯ_gëÖ­ 6,ÇÛ¨^½:“&MâÂ… \ºt‰ÈÈH|}}³ŒíÙ³'#GŽdúôéœ8q‚äädvîÜÉ«¯¾š1ÎÑÑ€ÈÈHnÞ¼ùÀ ù9GF:t(›7oæÚµklÚ´)£†Oï䘛¹I?zýÞ‹í··‘Á®­»ru>¤ ÁÁP»6téb^öý÷pø0@¥JÆæ“BË 7f2““œ$Œ0ö²—gx†æ4'œpnpÃèˆ""""""""b0wwwˆ¿¿?ýõ—ÑqDDŠ$+£Há“Þ #;&“‰ádzzõj¶nÝJïÞ½³ŒñôôÌ(È­1cÆðõ×_sôèQZ·n±¼sçÎ=z+«Ÿ²¾¾¾Œ3†‰'2qâÄŒå½zõbþüù™Æ:”5kÖ°aÆ Â!C²Ýfûöí™;wn–"Ž;;JÜ)?çÈhÏ<ó žžž™–¹¸¸£ççfnÆŒÃœßæpaÈ-¦·rw>¤ëÛ·/{öìaêÔ© 0à¾c·nÝš¥Jº·Þz+ÇcDDDDDмÀÃvï77£ÓˆˆHq’œ 11bt¹—Ý»áóÏ!2Ê—‡þýaÐ ¨YÓèdRÄ”£¾íf7a„1„!Œa =è?þ4¢‘Ñ1EDDDDDDDÄ “&MbíÚµ 6Œ%K–GD¤ÈQ‡É5[[[¢££ ¢iÓ¦”.]š2eÊдiS‚ƒƒY·n666µíªU«²eË:t耕*UÂ××—O?ý€Š+>p#GŽ$$$„zõêakkKíÚµù裈ˆˆÈ2ÖÚÚš~øŒc±··ÇÓÓ“•+WfŒ eРAÔ¬YkkëfÈÏ92Ú‚ èß¿?+VÄÞÞžÎ;³eË*åðN€¹™›ª6”YVÇ=ŽØÍ}¸óáNS¦L!&&†®]»âì쌵µ5eÊ”¡qãÆŒ9’½{÷°cÇú÷ïO­Zµ°¶¶¦J•*¸»»3sæLfü}w䜌)òZµ‚Æá«¯ŒN"""ÅÍ?µkСƒÑIäN·oÃÊ•àé Í›ÃÁƒþ AA*‘Gæ†ó™ÏŸüÉ8Ʊžõ<É“xãM$‘Üâ–ÑEDDDDDDD¤€ÙÛÛNdd$ß~û­ÑqDDŠu‘ ÷êv‘[[[rÔâ~ÛÍn]½zõXµjU¦eÓ¦M qãÆÜŸ¥¥%ï¾û.ï¾ûnŽögmm}ÏñéÊ—/ß‹ü³Ûn~ÎQnÆæÕ¶ÓÙÛÛ3kÖ,fÍšõÐÛÍéÜøá‡ÉÚÄÁæq¼æ˜±ü^çCNŽç¥—^⥗^ºï˜çŸžçŸþ‘Ljˆˆˆˆ ýûC` „†BéÒF§‘â"* žy\\ŒN"`îØ²d‰¹èã×_¡eKsaHÇŽpŸŽÒ"ë1#€F3šù‘pÂyשJUzÑ‹Á ¦5ŒŽ)""""""""䥗^¢wïÞ¼óÎ;xzzæøÕ""¢!RuéÒ…­[·råÊN:ÅW_}Å{ï½Àk¯½fp:)(Ó™Î2–áú¡+‡¶Òù """"b”×_‡ë×aõj£“ˆˆHqa2™ B||ŒN"ýe.ütq__sW 6Öüú¨DòY)Já…KYÊ1Žá‹/³™M-já7QDa"÷76‘¢'44KKKFŽit‘"E!Rè|ÿý÷´nÝšòåËãä䄯¯/W®\ÁÍÍ ___£ãIø‰ŸÅ( dûøí:DDDDDŒôøãàé ‹DDDŠ‹={ 1Q!FÚ·üüÀÕ¾øÂÜì?`þ|ÈA—f‘üàŒ3’H"ßð éL}êL0ç8gpBÉOåË—çË/¿dÞ¼yüðÃFÇ)2T"…ÎêÕ«yùå—©V­666Ô©S‡Ñ£Góã?bccct<Ég¸@zàÿá?:DDDDD ƒ×_7w¹rÅè$""RDE“<ý´ÑIJžôÎÍšÁÆ æ_œŒN'€ 6t£ÑDó?þÇk¼F0Á¸àBwºCŒÑEDDDDDDD$ŸtìØ‘=z0`À.]ºdt‘"A!Rè´oßžµk×ròäI’““ùý÷ß™4iåÊ•3:Z‰f2™0™LùºÛÜæMÞ$4³K,u>ˆˆˆˆˆ¯¾ ©© ;ñˆˆH^ˆŠ‚ÎÁÂÂè$%Cr²¹óÇ“O‚‡\¼ßññàïeÊPäžžà ‚âǘÂ~ã7¼ñÆ 7 ç׌Ž(""""""""ylÚ´iܺu‹ÿüç?FG)T""…ÆxƳ |Ã7T¡ŠÑqDDDDD$]¥Jæ H£¢ŒN"""EÝÉ“ðË/æ.’¿.\€ñã¡zuðõ…瞃ýûÿ颂)BÊR_|ÙÃv±‹æ4g#pÆ?üØÏ~£#ŠˆˆˆˆˆˆˆH©R¥ “'OæË/¿$66Öè8""…ž B È¿þõ¯,Ëðóó£N:ØÚÚbggG“&Mð÷÷ççŸ6 iîÄÅÅaaa««kƲ¢tLÙå·°°ÈômeeÅã?NçÎÙ²e‹qa ’_s´ |Â'„†;îY¶ý(fºß잟Ç’SÏ>û,}ôQìKDDDDJX½nÝ2:‰ˆˆeQQ`g/¼`t’â+1FŽ„š5!, üüàØ1ˆˆ0w )âÜpc&39Á Ba+[iJSšÓœp¹ÉM£#ŠˆˆˆˆˆˆˆÈ#zã7èСo¿ý6ÉÉÉFÇ)ÔTb€'NÂàÁƒ3-ß»w/Íš5#<<œ?þøƒ””’’’8pàS¦Lɶ€¤ /0Ïɾ6oÞ @›6m€ÂLw»;vÒÒÒøë¯¿X¹r%/¼ðkÖ¬É×L…ý5ÎNnçèOþäÿø?ºÑA Ê}ð‡`äyV DHH§N2:Šˆˆˆˆ:Á¥K°m›ÑIDD¤(‹Š‚¶m¡ti£“?GŽ€¿?Ô«‹Á¨QðÇðñÇðØcF§És¨€/¾àÑDS›Ú¼Ã;¸âÊXÆr”£FG‘G0mÚ´ŒëmEDäÞTb€   nß¾M÷îÝ3-5j—/_¦iÓ¦¬[·ŽóçÏ“œœL||<Ó§OçÙgŸ5(qÎÝ],PÔŽé~Å&“ “ÉDrr2û÷ïçå—_æöíÛpJcåõ%‘Äk¼FUª2‹Y÷Üæ£x˜mäÅ~ b›÷Ò½{wRSS™4iRìODDDDJ€:u n]ˆ‰1:‰ˆˆU7nÀÆæ®S’wöî…^½ AXµ ‚‚ !¡|y£Ó‰ä»R” /–²”cc#XÄ"êRo¼‰$’4ÒŒŽ)""""""""¹T³fM>øà>ùä:dt‘BK!ìúõë,X°€_|;;»LëvìØÀ²eËhÛ¶-•*UÂÆÆ†úõë3xð`~úé'#"ç˜ÉdbË–-´nÝ(ZÇ”]þìØØØðä“O2{ölöïß_ ù ƒ¼ž#&Þæm~çwV°{ìó>t åàà@›6m˜7o7oÞ4:Žˆˆˆˆ/¾?þht )ªÖ¯‡›7¡}{£“±±æâš§Ÿ†ýûaöl8|ØÜ%DX¤„r‰8ÊQV°€ô &5 $3œ18¡ˆˆˆˆˆˆˆˆäƈ#hÔ¨,°›1‹ˆ5*)`ßÿ=—/_ÆËË+˺Š+”””£mYXXdúýßébccéÙ³'...X[[S¹reÚ¶mËúõë³Ýž……III :”Ç{ ++«ïëÿûçÎÃÉɉºuëæÛ1¥¤¤L³fͰ³³ÃÎÎŽfÍš­[·²=¦7nàççG•*U([¶,¯¾ú* ™Æf—?'²,[¸p!žžžT¨P[[[ê֭˘1c¸|ùrƘÔÔTf̘A‹-¨Zµ*666899ѵkWbþ¾ãnQxeŽê|U‡Ei‹¸Úé*íê¶Ë2GwfÉnYZZŸ|ò ®®®ØÚÚÒ°aCæÌ™sÏã¾óñƒŽ5»ýæf®³s¿c¹×÷Ýrrn¥óòòââÅ‹DEEå(ŸˆˆˆˆÈ½ø"üü3\¹bt)Š¢¢à¹çà±ÇŒNRtݾmžÇ矸xV®„={Ì]B,-N(R(Xb‰>DM<ñ¼Å[LcÕ©NwºC &tˆˆˆˆˆˆˆHageeÅÌ™3‰eþüùFÇ)”TRÀ6nÜ@“&M²¬ëܹ3-[¶¤_¿~Ì›7ýû÷“–öð­Ì=<û,dâĉ|óÍ7’˜˜HJJ çÏŸ'**жmÛæxÞ~jŽ~YD©ÈR´IlÃÙÿïÞsô ÑÑÑDEEqùòeŽ?N§N˜2eÊ}Ÿ—“cÍÎÃÌõƒÜ½ÿU«Vaaa••_ýuƸܜ[é4hÀŽ;*›ˆˆˆˆHU«Býúæ PEDDrã§Ÿàôiøûÿo$‡®]ƒÏ>ƒZµ`ÀhÙ~ýõŸn+"’c¥)M7ºK,»ØEÚð1ㄽèÅ^öQDDDDDDDDîa„ ”)S†Ñ£GED¤ÐQAH;uê*TȲÎÁÁ+VpàÀ‚‚‚ø÷¿ÿ««+W®\aîܹ<ýôÓìß¿?Wû5j3fÌà‰'žÀÎÎFŒpÏmM:•öíÛãàà«}mÙ²€Ö­[çÛ1-Z´€É“'Ó·o_qtt¤_¿~|þùç™ÆÜ}LmÚ´ÁÁÁOOOÂÂÂˆŠŠºoþû©R¥ Ÿ~ú)•+WÎX6kÖ,/^L·nݨR¥ eÊ”¡E‹ ß~û-5jÔ`ûöí|öÙgÌŸ?Ÿ .бcGÖ­[—ã91ú5¾ŸìæèË%_ÂJhhjȺšëî;GAÇŽ)W®Õ«WÏ(Š‹‹ËÍaåØÃÌun$$$гgOL&~ø!ÏÝñ¡~nέt+Vþù¹#""""’'žTt,""¹®®æò`W®À„ æ9?^þøfÍ2gŠÈ#qÙÌä$' #Œ=ìáiž¦9Í 'œÜ0:¢ˆˆˆˆˆˆˆˆÜ¡lÙ²|ùå—Ì›7˜˜£ãˆˆ**)`—.]ÀÞÞþžc7nL@@Ë—/çèÑ£ÄÇÇãååÅ¥K—3fLŽ÷µdɼ½½‰ŠŠâôéÓ¤¦¦fZóæÍlŸçéé™ã}¤‹çôéÓT­Z•† fYŸWÇtøða:vì˜e]zwˆøøø,ë<<¥¹OÎ IDAT<2=NïpqôèÑå‡:9œ¾yعΩäädºvíÊÅ‹iÕªãÆË´>7çVºôŸ3é?wDDDDDòÄóÏ›ïòž–ft)J¢¢ sg£S~W¯Bp°¹#Hp0¼ù&üö›¹Kˆ³³ÑéDŠr”Ã_ö³Ÿ]ì¢ÂœqÆ?þÇÿŒŽ(""""""""ëС]ºtaðàÁ|½žˆHq¢‚–Þäúõë9~NýúõY°`[·nÍñóÆÉdbàÀÄÇÇ“””„ÉdâÚµk÷}^zWÜØ¼y3óÎ{Lù%7ù«U«FXX:tàÂ… ¼÷Þ{ëL&iii¤¥¥qûömnß¾±àÖ­[¸»»óÛo¿±mÛ6&MšD×®]qpp`íÚµøûûç(wa}ï5GCJZ«4ð´?çÎãܹsÌ;—‘#Gfs§¡C‡²yóf®]»Æ¦M›2:pøü}WÂÜv8sqJ¿~ýHMMÍè€1xð`:uêĬY³HHH ))‰ääd>ÌW_}…»»{ƾ.\HbbbƼ,[¶ Óqö×ø~2æè•TB*‡Bï54ÏUNæ(?ÝïX³“×sîÀ 4€·Þz‹×_=Ûq¹9·Ò¥ÿœyþùç)£ˆˆˆˆH&¶¶P»6.^¼HïÞ½qttÄÑÑ‘¾}ûréÒ%<==3Š=îôÌ3ÏàééIÙ²eyá…8~ü8...Ü7ÿƒ|øá‡”)S†åË—³k×.úöíËСC9}ú4  V­ZØÙÙQºti4h€¯¯/;vìÌ]KzöìIõêÕ3æ% 0äd> Ãkü ]>é߀)ÂDë]­s5Gùé~󚼞ët¯½ö7nÜ`áÂ…9îÎó0ó–þsæÅ_|¤Œ"""""Y4j¤!""’3W®À–-ð÷YJ¼¿þ2‚¸ºÂœ9ðÁ`.)_Þàp"r/u¨CAüÉŸ,f17¹‰7Þ4 ÁsžóFG)1J•*ÅôéÓùñljŒŒ4:ŽˆˆáTRÀ:wîL¹r娰aC–u{÷îå½÷Þ£eË–TªT KKKìííiÒ¤ #FŒ`ß¾}<ýôÓ™žÊ Aƒ¨Y³&ÖÖÖ™Ö9’êÕ«‡­­-µk׿£>"""⡲ßk_é#<<<²\PŸ×ÇdkkKtt4AAA4mÚ”Ò¥KS¦Lš6mJpp0ëÖ­Ã&›; .X°€þýûS±bEìííéܹ3[¶l¡R¥J÷Íÿ NNN 6 “Éĸqã˜2e 111tíÚggg¬­­)S¦ 7fäÈ‘ìÝ»€;vпjÕª…µµ5UªTÁÝÝ™3g2cÆŒÍGaxï'ú:ö¥vbmL®ç(?Ýo^³“×sîðáÃ9›ÛyÛ°a*TÈè„#""""’gT"""9µv-¤¥™;„”dǃ¿¿¹dÑ"øôSs!H@ØÙNDrÈ[ºÑh¢ù•_ù7ÿ&˜`œq¦;݉!Æèˆ""""""""%³Ï>KŸ>}9r$×®]3:Žˆˆ¡,L&ÓC÷³Þ½{7nnny¨$6láááœ?{{{£ã<²Þ½{3þ|>ÿüsFŒatœLÒ‹îwšæü…ÅÃÌѮЊV˜0K,åÑ Òõëש\¹2$44Ôè8ENAþýfaaÁ’%KèÞ½{¾l?<<<£ÃˆHwÌ?k–²Ôà$"Rä}ý5ôëׯƒ••Ñi¤Xºt)=zô¸ï¿¿óJ~¿_Oo¾tiþÿ}©÷ëR"ôì ‰‰°q£ÑIŒqìL˜s炳3Œ}ú@67µ‘¢é*WYÌbf0ƒ8âpà _|y“7±§è$"RåôßbºB¤ø*Èÿÿ‘üsþüy4h@ÿþý 6:ŽÜ‡ÞIqV®7U‡Œ;KKËbóƒ-½{DëÖ­ NòpŠzþ‚Û9J&™.tá"YËZƒ`É’%XYY`t)Ž5‚”øýw£“ˆˆHa––fîR»—&&ÂàÁP¿>ÄÄÀ—_ÂáÃàë«b‘b¦,eñÅ—½ìe»pÃá Ç 'üðc?ûŽ("""""""R,U®\™ñãÇóù石¿þFDJ.„ÀÉɉѣG3cÆ ££ä‰„„L&S‘½3JQÏ_r3G·¹M/zñ ¿°ŠU8ã\ ån_|ñ£G¦ZµjFG‘â¨aC°´„ƒN"""…Ù¶mpîtìht’‚sö,Œk.Yµ &M‚_5wÖ²¶6:ˆä37ܘÉLNr’ñŒ'†šÒ”æ4g>ó¹Å-£#Šˆˆˆˆˆˆˆ+ä™gžaÈ!ÒÉ^D¤0RAˆAùù矎!’çF2’¬ ’Hžâ)£ã”X?ÿü3~ø¡Ñ1DDDD¤¸*]jÔ€ß~3:‰ˆˆfQQðÄæâˆâîÜ9s!ˆ«+Ì™~hîâï¶¶F§‘V øãÏoüF4ÑÔ¦6ýéOuª3–±$`tD‘b¡T©RLŸ>ØØX–,YbtC¨ DŠ5“ɤªÏô13•©,d!ÞxGDDDDDò““œ:et )ÌV®„NŒN‘¿ÎŸ‡À@¨Sfφ>€„0PŠH‰VŠRxáÅR–rŒcŒ`‹XDêà7‘D’FšÑ1EDDDDDDDŠ´æÍ›Ó¯_?FŒÁåË—Ž#"RàT""yâ+¾â> ”PºÑÍè8"""""’ßœœàäI£SˆˆHaõûïæ>>F'ÉW¯Bp°¹dút1Ž1‚”)ct:)„œp"€Žr”¬ =¨IM ä,g N("""""""Rt“ššÊÇlt‘§‚yd+YÉ`ó1”¡FÇ‘‚P­š:„ˆˆˆÙæÍàà`î2{6œ9+V@¥JðüóF§Ë[×®™ AjÔ€I“`øps!H` ”-kt:),±Ä¢‰æ‡x‹·˜Æ4\p¡;݉!ê|."""""""’•*Uâã?&,,Œ}ûöGD¤@© DDɶЃ `ðÑqDDDäo±Äb‰%w|Eþýuç2K,‰%Öè¸"RU«¦!""bvé\¿kÖÀ€ðøã0q"Ô¬ ‡.o\¿n>¦5 (ÈÜäèQs!H¹rF§‘"ª>õ "ˆDYÈBNro¼iD#‚ æ"Ž("""RäÅÆÆbii‰……EÆwdd$‘‘‘™–YZZ«ÏKäÿgïÎã¢*÷?€†YdÙ7G%/feämKqµ K «[Ѯխ´~vÕV»mšeiZ™÷^î-EÒJ¬LËJ4+°RAÙw}Ÿç÷Í43pfæ°|Þ¾|1s8sÎ÷™s˜ç™g%"¢,11'NIJeË䅈Ȫ8 „ˆzí(Ž"î°AîpˆˆˆHG €¢ÇýP aVˆˆˆÿŽBg/&"ò\\:~¶·juGÞpþ<‘L˜Ë–GÊgo´´6£Gw yàŽ ÿø‚‘Ù8Á ñˆÇaF:Òq5®Æ3x!Áݸ'pBˆˆ¬°°0(ÚK „…±½„ˆˆh ³±±Á† pðàAüïÿ“;""«á€"ê• d`fár\ŽíØ[ØÊéð†7¦`J·y´ l0Sá o+FFDƒ†¿?ÐÔÔÑá—ˆˆ†6Í€]B­­ ;UÜv›UÃêµHNÂÃGæÍNž~ðð;:"Ä"MØ„àe¼Œoñ-&b".ÃeØŒÍh@ƒÜ! (ÞÞÞ˜2e lm»i/±±ÁÔ©SáíÍö""¢îòË/ÇÍ7ߌÇ{ MMMr‡CDdBDFmÅV<†Ç  ?ãïœA b0ãð1>†#eŠˆˆˆº“€„.ù¸¡}ˆˆzÅϯãga¡¼q‘ü 1dýzËÆÑ“ï¿Þ|³çýÒÒ€K/nº©ã篿›6¾¾–‘ˆèÃ1‰HÄ/ø‡p*¨ð@p7îÆ¯øUˆˆŒ„„ˆV:NH`{ Ñ`±víZ”––âµ×^“;""«à€"2¨5xãe¼Œ»q·¶3i>ò1Ó„ ìÃ>¸@bƒ?YÝu¸v°3ú{[ØbæY1""TFŽìøY^.oDD$¿ž„ØÚO> DG['C22€éÓŽ7¼ÏáÃÀäÉ@L œ8$%*•uc%"ê$ QHBr‘‹XÏð.Ä…˜ŽéHF2ÚÐ&wˆDDDDýÚu×];»nÚKlm1oÛKˆˆˆ‹€€,_¾Ï?ÿ<ŠŠŠä‡ˆÈâ8 „ˆ zo¢:V ¹w¢ŘŽépƒöaÜà&s”DDDÔw¸#±…ØÁqˆÃp —!2"4ëë僈ˆä×Ý€{{àâ‹U«¬Og99À´i@ccÇà”¿ÿ]ÿ÷?üÐ1XeòdÀÉ 8zس˜0Ažx‰ˆŒð…/–c9²‘Ïð”Pb!ÁXÈCžÜ!õKîîîˆ58(ÄÎÎqqq>œí%DDDƒÉ£> ///üßÿýŸÜ¡Y„Q hÀKxI;«˜j¼÷q ®jìÇ~xÂSæ(‰ˆˆHŠ›q3ÚÑÞe{;Úq n‘!""4†   ¡AîHˆˆHnÆ„(€£#ð¿ÿu ‘CyyÇ`ª* ­­ãÿW_û÷¿þ ,\\qEÇÇ/¾èØ!O¬DDÙÀшF’ð;~Ç,Á»x£0 qˆCÒ´«~Q‡›o¾ííÚKÚÛqË-l/!""lœñ /`Û¶m8zô¨ÜáY„QoãmÔ Fo›jœÁD Þð–)2"""2Õl̆ ºvÐs†3fb¦ Ñ ¡PÎÎ\!„ˆˆ: "ðÎ;@h¨UÃÑjhfÏrsÖÖ?·ÛÚ·Ý\x!pútÇj GŽS§Ê'QŒÆh¬ÅZä!;°MhÂtLǸ/âET Bˆˆú…Ù³gÃÅÀ„ÎÎΘ9“í%DDDƒÑM7Ý„ÈÈH,[¶ Bpò "¼8 „ˆô4£/âEƒ3‰«¡ÆNìÄݸ›³‹ NpÂ,€´Ûìa…Xg8Ë ..BDD,ô·ÙÙwÝÜt“<1µ¶sç?þ¨?ÚÛ¢"à¾û€cÇ€9s䉑ˆÈŒáˆxÄc?öã$NbfáY<‹@b!â|#é8ó1÷ã~í âDDDDƒ…““,Xï¯öööX¸p!œÙ^BDD4X­_¿ßÿ=’’’ä…ˆÈb8 „ˆôlÅÖng SC­ØŠeXÆA!DDDÄb,F Z´Ï[ÑŠÅX,cDD4h Æ!DDÔÁÉéÏÇvvÀ¨QÀºuòÄ"ð·¿mÝthþøã®ƒEˆˆ¿à/Xõ(D!Öc=NᢅËp6c3êa¸ Ÿ ìÆn¼…·‡8£û T‹/FK‹N{Ik+/f{ Ñ`6qâD,Y²>ú(䇈È"8 „ˆ´ZÑŠçðÔP÷¸ïlÀ1³BTDDDÔWÓ0 #0Bû\ %¦bªŒÑ ÁBˆˆHcذ?+@r²þ6kzøaà?ÿé~0ˆ@q1ðöÛÖ‹‹ˆÈÊÜà†D$âN éˆ@ÂCð‡?îÆÝÈ@†Þþoá-ØÃpQˆB)JeŠžˆˆˆÈü¦M›†#tÚK”JLÊö""¢ÁníÚµ¨©©Á+¯¼"w(DDÁ!D¤õ>@1Š®üa [ÀŸ»° —á2k†GDDD½d [,Æb8üñïÜ;ØÉ ..gÒ!"" #O:ƒ¬_\|±oiγxhDDý…<° Ëp§ñ >üð7ü £0 ÍÐ/Û·¢E(Â_ñW|‹oeŠ˜ˆˆˆÈ¼-Z„––´´´`ñb¶— ?ü0¼½½ñÔSOÉ ‘Ùq˜;Q?ÔÐЀœœäçç#//¹¹¹ÈËË!88AAA DHH†iÜû`v 9z«ƒØÃ­hÅdLÆsxWâÊ>Ÿ‡ˆˆˆ,¯¡¡………(..FQQŠŠŠP\R —¿wÌÚœòJ ~ðù~~~ðó󃯯/üýýÍR¦ ¢!¦­ °µ•; "¢A«¶¶­­­8þòòòPQQ¡Ýذa A`` àÈ‘#ÈÉÉACCƒvŸ#F ((Èà`‘   øûûwÛˆ¡†ÏâY( €€€=ìц6LÇt<§˽!DDD$Yyy¹v‡î`’’ ´´ùùùÚŽ‚ P(àíí \à|à“ÔOPRR‚ÒÒRÙ“]]]oooÀÇÇGoЈ槗——ÕÓNDýT{;„¡c@nMM jkkQSSƒóçÏ£¶¶Vû¼¶¶çÏŸGMM šššPWW‡ÆÆF455ô¡ù©t]hMt~¬¡T*{<ž³³3œœœzL{EEÖ£ÁÖ¹]öBàüùózÛt¸èP‘:X¥³…þB ÜÖ§ãæ†\”zz¢ØÓ-puu…“½=ÜËÊàñÓOp?{nnnÚÿpww‡››ÜÝÝ9`šˆ†Œ …Fߎv¨¡Æ<ÌÛx÷à+F÷§ÖÖV”––v䡸¡©)))AKK‹öuŽŽŽðööF`` Fމüü||ùå—(((@mퟃ`4u(ºõ"šA#šú0 ""@‘––†Ý»wcÏž=(--…ŸŸ`òäÉðööF\\æÍ›‡èèh8;;Ë1YÊ¢E‹ðꫯbåÊ•HMM•;""³á€"3«ªªBaa!ŠŠŠììl½ç¹¹¹Ú™ŽÆw•J•J…«¯¾þþþÚçš…BÑå<Ïñûï¿cß¾}ÈËËÓΩ{??¿.çøiìOøÝçw( €ì€<'0c¬òž uºåC?«ªª››«7ÐÃÁÁ#FŒ€R©„¿¿?BBBpå•WjózÍÏ   ®CWJ;÷Ñ£GQUU…¼¼<½ÏÝùœÝž›ˆ!¢A¤¦¦•••¨¬¬DEE…ö±¡ÿyhVâèÌÙÙ¹Ë`ggg¸¸¸`Ĉptt„»»;ììì´«]¸ººÂÉÉ ÎÎÎz+aØÙÙiWÑШ1lØ08::Zí}²¦ææfí¤(MMMhllÐq­ÚÚÚôÑ.-E}{;*[[ÑÖÖ†ªª*´··£¶¦vÍÍhih@II » ÚÑ·3ÍÊ%ÚA"žžžFÿ1BûXs­ˆˆ‚ب]1ÜñÇ¿{q/Îá^À P kÛEo455¡²²²Ûz‘¢¢"äääèå¹NNNÚz¥R‰‹.º3fÌèRGáëëktÖïžÎ}æÌuiÛqrrê±^¤§s‘åTVVâÀسgvíÚ…ÚÚZ„‡‡ãÞ{ïE\\"":&ÅÌÎÎÆž={œœŒ÷Þ{ŽŽŽ˜6mâââ0wî\øúúÊœ"""2'…B—_~S§NÅ_|k¯½V̂BˆL ;ÃРÎ5•J¥ÞŒèèh½çÁÁÁ°³ëÝŸ¡³³³vP‡1UUUcÍÌÌDZZÎ;µZ ì0¶}T«PëZ‹-ª-z±j‘4=u(Ðü,--í¶3Ãøñã-Ú¡@©TjÏÓM9ÈXÇŒ“'OjgèÔ]uD7=Æ~ÀÃãÏi!"p@õcuuu(,,DYY™v6qÍ㢢"”––ê òÐíä 666;ý‡††jW‘ðððÀðáÃõV™Ð Ð ô Þstt´Ê`—ÖÖV½>º«»è©®®Fee%JJJð믿êÝ?ºe`°³³Ó(2räHøùùigžïüØÕÕÕâé$"2¤HBR·ƒA:{ /¡¥ØŒÍ°ë¦©±»º„΃=tiÚV4.T*•Åê4õþþþÚŽ¡Æt7©Fff&>ÌI5ˆˆˆdtîÜ9ìÞ½©©©8xð lmm…gžy7Üpº¼F¥RaÙ²eX¶lÊË˱wï^¤¦¦â‘GÁ½÷Þ‹‰'"667ÝtÆ'CªˆˆˆÈܦL™‚˜˜¬X±ßÿ½Áɺ‰ˆ¶Hý¡¥¥åååFWöÈÎÎFUU•vM#f°DDD„Þª!!!²7ä*•JDDDmÄФùÇŠQòY Ê*ËP8¶#ÍiiiÚôktNsç•FúCš‰ˆˆ,Í\"##»t ÄðáÃeJY÷¤ FzsìØ1¤¦¦özfMزó9QÿÑÖ°³3YYMM òòòŸŸ‚‚äåå¡  %%%(++CQQ‘v%]^^^zð'M𤷢CçÿJ¥R¦’µÙÛÛk¯{oUUUu»²Lii)Nž<‰ƒ¢¤¤z¯6l˜öÞ‘:â IDAT9r$|}}µ„„   ¸¹¹õ5¹DDz2‘‰v^éJ³ª¸ :&¤PC6´á=¼‡_*~Á{@qa±ä•N5ßï5í*i`'Õ ""ê233‘œœŒÔÔT;v J¥ÑÑÑØ²e æÏŸoÒê^^^X²d –,Y‚ÆÆF¤¥¥!55o¿ý6Ö¬Y•J…ØØXÄÇÇ#22’G‰ˆˆ°—^z 'NDrr2.\(w8DD}Æ^4d[-Có\»Zô& ­ì¡R©E§íÌW˜`^×}šššPXXhð½;vìX«¢t<Ò—UQˆˆˆ,©»Yufptt„§§ç€îÌ`næ˜YSSnãÌšDW!"3kllDvv6rssQPP€üü|½ÇË...Ö–ÅT*|}}áëë«íTïããooo– È¢4„¥jiiAYYJJJP\\ŒÒÒRk69s‡BNN´¯sssÓÑ  D@@‚ƒƒ1zôh899Y"‰D4H]«Q‡:T4W ï|²*³[•‹ÂšB×£´±åmå8ß~5ê4Ø6n¼€ôútÜóà=ða½•NNªADDd9ííí8rä’““ñÑG!??!!!˜1cV­Z…3fÀÁÁ¡ÏçqvvF\\âââ°qãF9r©©©øøãñúë¯cäÈ‘˜9s&âããc•Õ-‰ˆˆÈ|.ºè",^¼+V¬ÀüùóÍR~ "’{eÓ  ™mÉØÊyyyhmýs¹s¥R©¨0~üxÄÅÅé \ 2 =qrrê±áB·3§î5ÈÎÎFZZZ—Æ Íûol¥???ΦADDfÑSúægii)ÚÛÿœ SwFFvf°œþ0³¦¿¿ÿ èK$!¢^Ð þ4ô_wÂŽÎyùĉõêX‡@™ƒƒÐ㾺uŸºup………øé§Ÿº¬l¬[÷Öùÿ¨Q£ø7C4Ęc¥Ó ý/Ôÿ.­ô‡Ÿ÷+žïŸ+œTƒˆˆHšúúz|ñÅHNNFJJ ª««Ž›o¾±±±_­ÃÖÖQQQˆŠŠÂÚµk‘™™‰ÔÔTìÙ³óæÍƒ³³3®½öZÄÇÇcîܹ\Í‹ˆˆh€xöÙgqÁ`óæÍxà䇈¨O8 „ú½ææf]ÙãôéÓ¨©©Ñ@×4rFFFê=gŶùIéÌ©ÛÙC÷Z~óÍ7]:p:::" ÀèJ#cÆŒÁðáld""ÊÌÑ™Áßß_[NÐm d>ÓÏpfM¢~¬¥àŒ9Dd@MM ~ûí7œ|çÎCCCƒv¥RÙe°ˆîó6HY9;3DGG³3™gÖ$2!€ÚZ!Brrrpüøq;v ÇÇñãÇQRR £3º¦#ytt´¶#yhh(¿{ÉÌÆÆ!!! ÁÔ©Sõ~×ÞÞŽ³gÏâ×_Õ®èóÍ7ß`ëÖ­ÚÁѾ¾¾¸ôÒKq饗"""—^z)‚ƒƒåH Ñ€ÕוN5ß#U*W:%³à¤DDÔ´··ãÈ‘#HMMÅÇŒS§NaäÈ‘˜9s&–/_Ž˜˜8::ʦd...ˆ‹‹C\\œ6mÉÉÉø÷¿ÿ_|!!!˜1cbcc1cÆ æDDDý€R©Äã?Ž5kÖ 11r‡DDÔ+B½¦Û9ßРÎK‘ëVüªT*DGGë=gç|ꉔ™­4 ïÉÌÌL¤¥¥é BrppÀˆ#º]iÄßßßZÉ#"pØ™†©3k677£¢¢‚3kÒÐRW´·\¥‘hP:{ö¬và‡ægyy9lll†ˆˆ<þøã˜0aƇ   ¹C&¢^°µµÅ˜1c0fÌÄÅÅéý.77¿ýö~ùå?~ÉÉÉxþùç¡V«1räH½" •'D2âJ§4pR ""2§††8p©©©Ø½{7JJJ R©‹­[·šU4lmm…¨¨(¬_¿™™™HNNFjj*6oÞ ¥R‰èèhÄÆÆbþüùpg+‘l–.]Š7ÞxÏ>û,Þ~ûm¹Ã!"ê!ƒZZZŸŸ¯×©^÷qVVΟ?¯Ý_SY«éD¡×Á>$$®®®2¦ˆ† ¥R‰ˆˆ£3Zµ´´ ¼¼ÜàJ#iiiÚÇïíÎÃÂÂàææf­äY;3™ÎÑÑѬ3kæç磦¦FïuœY“úêꎟÀG4(dgg#-- ‡Æ×_œœЖëî¿ÿ~DDD 22žžž2GKDÖŒàà`ÄÄÄh·ÕÕÕáĉ8vìŽ;†]»váÅ_D{{;|}}1yòdDFF"** 'NäjŽ4 566j÷s¥S"é8©S^^޽{÷"55{÷îEcc#&Nœˆ{î¹7ÝtÆ'wˆ7~üxŒ?«W¯Æ¹sç°{÷n¤¦¦âÎ;ïÄÝwߨ¨(ÄÆÆâ†n@@@€Üá )ÎÎÎX³f î¹ç<öØc=z´Ü!™ŒB†(c«(hžŸ;wjµ@×U¢££‘ ·ªgå¡ÂÁÁ¡ÇΚMMM(,,4ø7rìØ±.KŸw^ý¦óà‘àà`ØÙñã–ˆäeŽÎ šÏNC ¯ìÌ@$]_fÖÔý;îËÌšš¿c¢>Ñ Z•G­VãÇÄ¡C‡pðàA>|åååpssCdd$î¾ûnLž<'N„‹‹‹ÜáQ?âêêªåV£®®Nû™rèÐ!<õÔS¨­­…——&OžŒk®¹“'OÆ%—\Âï$+)+¡ªªJïu\é”ȼ8©ÑÐ={ö 99GŽ££#¦M›†W_}sçÎ…¯¯¯Ü!Ê&44Ë–-òeËPYY‰`Ïž=xê©§ðÐC!<<ñññˆ‹‹ë1¯$"""ó¸õÖ[ñÏþÏ<ó Þÿ}¹Ã!"2{(Bº•£†:´çå塵µU»¿R©Ôv`?~<âââô:´‡††²¡Ž†''§g±êüw¦ù[Ǫ́Úyæ*Íß™±•FøwFD½ÅÎ DƒgÖ¤~+„ (MMM8|ø0öìÙƒÿþ÷¿(,,„››.¿ür<ú裈ŒŒÄ_ÿúWvŠ#"“¹ººbòäɘù$ÊÊʰoß>¤¦¦âÁÄwÞ‰k¯½ X°`\\\ä™,È’+±M„ˆºÕ—I5tëE8© e•••8pàöìÙƒ]»v¡¶¶áááHLLD\\\Ÿ¯$¶¯ÁêÕ«‘­rÓM7ÁÑÑÓ¦MC\\æÎ ___¹C&""ðlmm±zõj,Z´+V¬ÀE]$wHDD’q@ˆifÝÕíü­û8''GÛùÓÞÞ^^^FWö`§O¢ÏÑѱÇ«4 ¤†V9|ø0Î;‡††íþJ¥Òà`ÍãÎNEdìÌ@ƒÍôéÓ±ÿ~¹Ã qfM’¤ ðóø½”¨_8{ö,6lØ€wß}õõõ˜9s&þóŸÿ`öìÙì\MDýÊÈ‘#±dÉ,Y²õõõøä“O°}ûvÜqÇX¶lî¸ã<ðÀ •;T’¨¥¥ååå=N‚‘——‡ÖÖVíë:¯tªi éü]+’X7BœTƒˆèOçÎÃîÝ»‘ššŠƒÂÖÖQQQxæ™g8ù„•¨T*,[¶ Ë–-Cyy9öîÝ‹ÔÔT<òÈ#¸÷Þ{1qâDÄÆÆâ¦›n¸qãä—ˆˆhÀŠ×®òßÿþWîpˆˆ$c º™èvÚ64ÛNNêëëµûëvÚV©TˆŽŽ–¥Óvç%NNN1b&Nœˆ `ñâÅf¡F÷|º•žÆöënŸþH¡PŒÙZé‘ó}³ö¹-}>©Çÿâ‹/°aÃÃ…^ˆÓ§OãþûïG^^žzê)‹Å§K©Tj—‹5DÓ©ÝÐJ#iiiÚÇ?Ã:? ƒ›››UÒFd ƒ­3ƒTRó`S%„@]]Μ9ƒ””\qÅøðÃc†¨I.;2ƒþPNÑÅ™5¡ÂB!’QYYüqlß¾áááØ±c®»î:Πn·ÝvüqìÛ·‡ÂôéÓ%¿ÖPýÂ`«÷²k¾or\#kœóñÇÇÕW_­·mòäÉX±b^~ùe‹ž[ ___,_¾ÿûßñ¿ÿýÏ>û,.¼ðB,Y²ÿüç?áåå%wˆƒ†”•N õÚ=€¿Ò)ëFȬúSÝ'Õ ¢þ®­­ ß}÷’““ñÑG!??!!!˜1cV­Z…3f˜u2Ñ5kÖàèѣؼy3ÂÂÂÐÔÔ„ü<ò^xá‚•ÈÙÙqqqˆ‹‹ÃÆqäȤ¦¦b×®]xýõ×1räHÌœ9ñññˆ‰‰£££Ü!õ{óçÏǤI“ðÌ3Ï %%Eîpˆˆ$Qˆ>|‹:vìôXiÕßš]_÷ù¹sç´3988 00Ð`'i•J…Ñ£GÃÃÃC晦»Í›7ã…^À‰'0|øp‹Ÿ¯7ûõ'ÝÅl­ôÈù¾YûÜ–>Ÿ”ã+ üôÓO¸è¢‹´Û²²²‰ââb‹ÅfnMMMÚ†CƒßN:…ÚÚZíþº †V±öÊæfÍüM¡P`çÎX¸p¡EŽ¿yóf$&&ZäØý¹:3tî4Üß;3˜ÂœŸ›ÆŽ•ššŠ'Ÿ|?ÿü³YÎ#åœD•5Ê2ƒ¡œbLç™5 öãÌštõÕÀÅ6È õIII¸ñÆ­’W[º¼®)›'%%Yäøºz[^ÿïÿ‹ûî»NNNxñÅqã7ÂÆÆÆöÞ`Z!ÄÞÞMMM½Î ÅŲmïXó}L÷0œ={·Ýv<Øåw×\s ¶oߎàà`‹œ»·Ôj5vìØåË—£¥¥o½õ,X wXý–¹V:5T/"ÇJ§–º¢þ…u#}ÓÓ¤šŸ%%%z«ÞpR éßÅK"cêëëñÅ_ 99)))¨®®Fxx8âââkÑÕ%|||ðË/¿ÀÛÛ[oûÉ“'1~üx–yÌ 33©©©Ø³g¾ýö[8;;ãÚk¯E||<æÎ;àú6YÓÞ½{1gÎ|÷Ýw¸üòËågP°fû‘µõ‹þ¦¢ÒÓÓEzzz_aq•••"##Cìß¿_lÚ´I¬ZµJ$&&Šèèh¡R©„ ý¯T*EDD„ˆ‰‰‰bíÚµ"))I:tHdee‰ööv¹“dv=݉‰‰båÊ•zÛ~ùå1kÖ,áêê*\]]ŬY³Ä/¿üb–óéî—››+æÎ+\]]…···¸ùæ›Eyyy—}wïÞ-®ºê*áèè(BBBÄC=$jjjôŽ¥ùÿå—_ !„رc‡ÞvÝ´Íž=[›¶3fHJ›î±4ÿï¸ãŽ^¥§·1hÎÓÛãõô>jdddˆY³f áîî.æÏŸ/rrrz}nÍûuæÌqÝu× .×Å”´J¹7Oœ8!¦OŸ.† &ÜÜÜÄÌ™3ŧŸ~jôø£GÖÆÙm\UUUÂÃãÛ}¢ÎŸ§Ë—/ =~žÆÇÇ‹¥K—¨ÏSkæoÄÎ;-vüM›6YìØÖÐÜÜ, ´÷Þ¶mÛÄÚµkÅÒ¥KE||¼ˆŒŒ*•JØÛÛëÝŽŽŽÂÏÏO/O_µj•X·nÞ}ØÚÚ*w­ªóçæùóçÅC=$F%…¯¯¯¸õÖ[Å÷ßoò±4…ƒƒƒö¹”Ïe)qô”ÏêæMnnn"&&FdffvÉO¤ä9Rò®Þ¾w=ßXþgh»f›”òEoöííë…~=Œ‘R&éë58yò¤¸üòË…³³³ˆŠŠ§OŸâ¯ý«pqqS¦L999‹­§{ÚRe¨ÁZNé‰n9fÛ¶mbݺuze™ððp1|øð.×D©TŠððp-ÄòåËõò’ŒŒ Q[[+wòä3z´Ï='wÔOìܹSò÷ý¾²ty=>>^ÄÇÇ[ìøºL-¯«ÕjñÄO…B!î¹çƒßÙûƒîò¹îò¯³gÏê½æôéÓ¢¶¶VoÛÙ³gM>§½¯Ëê|ÌåË—›|Lcå»Îº;Þ—_~i°«¶¶V¸ººŠ‚‚í6[[Û.õ`†H-OIIg_ËXR뮈œœ1þ|áîî.\\\ÄìÙ³ÅÉ“'Mz?uI©çê˽ijút™rz:þ+¯¼"ÞyçƒñmÞ¼Y¬_¿Þhür«®®‰‰‰B¡Pˆ•+W µZ-wHVÕÐÐ DzzºHIIÑkçˆÂÏÏOØØØèÝ“NNNB¥R‰ÈÈHmýܪU«Ä¦M›DJJŠHOOCîýdÝëFX7º‘¡X7ÒÔÔ¤——«gïÜÎãääd°ž]“—hêÙÛÚÚäN¢É¤~ý!ˆL•““#6mÚ$bcc…ƒƒƒ°µµ‘‘‘bíÚµâÔ©SV‹ÃÕÕU455õ¸Ÿ)e!¤õ²¹ÊC})ošSnnn¿¸îDDDÉäɓŌ3äcаfû‘µõ‡þ¦z@Hcc£ÈÊÊÒë ªÛâîîn´3n…Õþýû‡d§Pž*ž=*&L˜ }~úôi(¶nÝ*JJJDII‰xçwD@@€8}útŸÏ§»_LLŒØ·oŸ¨­­¹¹¹">>^ÜvÛm÷Ý´i“¨¯¯EEEbÉ’%âÖ[oÕþ¾­­MŒ=ZTVVê½®°°PxzzŠºº:mÚ|}}Å[o½%JKKEyy¹Ø¾}»P©T"//¯Oi“šsÇ`Êñzz…âÌ™3"((H{ýËÊÊÄöíÛÅ•W^ÙçsOŸ>]|óÍ7¢¡¡AìÝ»·Ç{ÅÐù¤Ü›¿ÿþ»¸à‚ ÄþýûE}}½ÈÌÌ“'O6ØÀ£‘žž.BCCÅÏ?ÿÜmLBñÈ#ˆ9sæô¸ß`TYY)ÒÓÓERR’¶£¥nc‚nô£££^£´nËôôtQUU%[:úCm.ýu@;3È£óçܼyóÄêÕ«Eqq±hjjÇ‘‘‘’òjcû¤¦¦jË R?—¥Æaìœó¦òòr±cÇqá…m$0–çHÍ»úúÞu—ç;†±´t._\wÝuFËK¦ìÛÛ×›z=Œ¥µ§2I_¯ÁÂ… ÅÉ“'ÅùóçÅý÷ß/.¿ür±`Áí¶{ï½WÜpà ÍX¬–,C årŠ "++K:tH[žémþ´víZ±mÛ6±ÿ~‘‘‘¡×QwÐpqâ½÷䎂ú éSËë+W®âý÷ß·PDæÓS‰±üë矓&MÒÛ_­V‹1cƈ'Nô꜖¨Ë2å˜ÆÊV¦/==]¸»»ëu¸{ÿý÷E@@€xíµ×ôŽ7nܸÿN¤”YLMgoÊX¦–}®¼òJ±}ûvQVV¦'88Xo@†Ô¸M©çêí½ijúzs¤öìÙâÇ4ãñãÇEll¬Ñ4ôï¾û®°··ÿøÇ?äÅ,4ƒ–uËž-ëvl6ÔΡ;hY·ìY]]-wòú-ÖüyÖ°n„u#dÈP™TƒBh¨ÉÈÈk×®‘‘‘B¡PˆaÆ‰ØØX±mÛ6ÙÚd-Z$zè¡.}8 ‘Z6Ò@Ê>æ,õ%¿¶”ºº:‘’’"´ŸéááábùòåâСClg%""úÃþýûqðàA¹C8 „³þÐß´ßiiiÑvíܱXÓ G¡Pt連[ɤÛ9” ÆõôE³®®N¸¸¸hŸß|óÍbݺu]ö{å•WÄ-·ÜÒçóéî·k×.½mÙÙÙÂßß¿Ç×VVV OOO½mO<ñ„xã7ô¶½ð âž{îÑ>¿ùæ›Å‹/¾Øåx[·nK—.•sw¿“’sÇЗãzo¹åƒ×ÿ½÷ÞëÓ¹îg­4ÄÐù¤Ü›‹-|ðÞ>¿ýö›Ñ†ˆÌÌLÚí,999âƒ>“&M^^^âÌ™3&¥e¨hjjÒëd©;UDDD—FÝÏöÎù222´ƒ¹Ì­?dÐæbí!ìÌпuþœsuuzÛ²³³Mêô V«Emm­øñÇÅêÕ«…§§§øüóÏ…Ò?—¥Æa,.cy“f50C±Ës¤æ]}}ïºËóLíôй|ñÛo¿,/™ºoo_oêõÂP™¤¯×૯¾Ò>/((è²-//Ox{{[46c±š» ÅrŠùIY³ó Vƒjfͪ*!!>ûLîH¨Ÿà€Þ1¥¼þå—_ …B!Þ ±zª#é.ÿš8q¢^û½{÷Šk¯½¶×ç´D]–)Ç”2 DÊñÔjµðËö¥ IDATññGŽÑþ~Ú´ibÿþý⪫®Òn;}ú´ðññ鱇”2‹©éìMËÔ²¡¿W^yE¯#ªÔ¸M©ç¢w÷¦©éÓ%õI9þ˜1cDYY™ÁKKKEXXX·éè/¶lÙ" …Þ}ÕŸp¥Óþu#‡u#=ïÛÛ׳n„u#CÁ@ŸTƒBh°kkk‡Ë—/aaa€9r¤HHH)))’Væ°´úúzñÐC OOO1kÖ,ñæ›oŠüü|ƒûJ-Hé es–‡ú’_[ƒæ^Yºt© DHHˆHLL)))¢¹¹YˆduÍ5׈)S¦ÈÆ À!4˜õ‡þ¦Š?~Ù+ÇŽDDD˜üÚªª*dgg£°°EEE]çää ½½`oo///øûûC¥RÁÏÏOûXóÜÏÏ …¢·IÒ º» êêêàë닺º:€¯¯/¾ÿþ{„„„èíwöìY\uÕU(**êÓùt÷«¬¬„R©Ônkoo‡½½=Ôjµ¤×ëžçôéÓX´hÒÓÓµÛÂÂÂðá‡âÒK/í6m%%%˜:u*Nž<Ùë´IM¹c°ÖñŠ‹‹áçç×ës+ Ô××cذaÝÆÓ›Ø:ß›¾¾¾8~ü8üýý{<~vv6¢££±eËL:µÛ}CBB°`Á<öØcðõõ•œÒ×ØØ¨Í å999¨¯¯×î¯T*»ä ºCBB`kkkR }ÉßL¥P(°sçN,\¸Ð"Çß¼y3ûtŒ––”——£ªª EEEÚkÒùg^^Z[[µ¯stt„§§'üýýµ×ÄÏÏJ¥Ro[pp0ìììúšT’ óçfLL °råJDGG›ttË]ÎÎÎFTTüq„……þ¹,5cù¬±óTTTÀËË«ËkºËs¤æ]}}ïºËóŒ¥ÓÐvC勿æf8;;w)/™ºooÏeêõÊÜ÷oMM ÜÜÜjµ¶¶¶]¶ÙÙÙõªÜi©{º·e(–SäUUU¥—gÊO P]]­÷:¥R©—ÊOCBBàêê*OÂ~ù¸è" 3—'êW’’’pã7öúsÞ–.¯kÊæIII9¾.SÊë111°µµÅ¾}û,•yôTGÒ]þµ~ýzüþûïØ¸q#`Μ9¸ï¾û0gΜ^ÓuY¦ÓXÙª7u·ÝvÆŽ‹ÿû¿ÿCAAæÏŸ£GbÚ´iزe F…W_}?ÿü3ÞÿýnÓ%¥Ìbj:{SÆ2µìS\\ Ÿ.ñDFF¢°°Ð¤¸M©çzwošš>SË•Rïé鉒’ØÛÛw9FKK üýýQ^^n4ýÉŒ3 P(ðé§ŸZíœ=Ö‹¢¤¤Dïþvrrê¶<§yÌö ëaÝÈŸÇaÝHÏû²n„u#ÔwÍÍͨ¨¨è¶^ÄPû‚““S—¶CùhoÚ€éßŬÙ^DÔW 8pàRSS±{÷n”””@¥R!66ñññˆŒŒì—eÎêêjìÛ·{÷îž}ûƒ7bøðáÚ}¤– ¤ôº¹ÊC}ɯ噙‰ääd¤¦¦âرcP*•ˆŽŽFll,æÏŸwww¹C$""²ª¯¿þ×\s ¾þúkLžrrr´ÏóòòP\\¬½ D`` fÍš…+VôxŽþA›Ko„lذ)))(**BIII—!žžžðõõ…¯¯/üýýáãダ€x{{# >>>ð÷÷׫¨¥þ¡ógUuu5Ö¬YƒÝ»w£¬¬ —\r ,X€ûî»Ï`'¡îŽeˆÔÏe©q;§)ySO±KÍ»,ùÞ™ÚéÁZûJ}½©×£3©es_)ÛÌ›±÷ÃÜe(–S†êêjm'‚‚”––¢  %%%(,,Dqq1Š‹‹QYY©÷:///øøøÀÏÏsçÎŃ>h€?þX°¨¯œ­sNê×8 ¤w¤–×…P*•xõÕWñ·¿ýÍâq™CoêH4ÊÊÊð—¿ü¹¹¹(..Fll,233{¬4µ¼Ø—º,SŽ)¥œ!õx;wîĦM›ðÅ_ॗ^‚~øa¼óÎ;(//ÇO<©S§âÞ{ïíñ{¦”2‹¹Óih›9Ê>†ê~¤Ö™RvíͽٗôI¹FRooo––ƒ±ªÕj8::öø·Ð_lÙ²>ú(ªªª,ÒnÐÔÔ„{î¹YYY(..Faa¡Þu²µµ…^½ˆ±ú'''³ÇG}ú‘žcgÝHß_ϺÖéÔj5JKKõêEòóóQZZª­)**BQQµ¯³³³ÓÖ‹øûûcåÊ•˜4iRçã€LΟ?„„¤¥¥¡µµQQQ˜;w.æÍ›‡Ñ£GËžIêëë±dɸººbÛ¶mÚí¦äÁ=õ²9ËC}ɯ喕•…Ý»w#%%‡†½½=¢££±}ûvxxxÈ‘ÕLž<nnnØ»w¯Ü¡ hBƒYèojcñ3Ó€·iÓ&ÌŸ?_ûÜËËËà̉EEEðòò²fh€„„¸¸¸àðáÃhjj‚Âh¥îí·ßŽ­[·¶nÝŠ»îºKï÷^^^¨¬¬ÔC÷O1ÌÅÜ1H=žÔ÷ÑËË ÅÅÅ]¶WTTX<-=‘zoŽ1BòŒ‡«W¯Æ'Ÿ|‚;ï¼ßÿ½Ùb%"ꆎW_}YYYÈÎÎÆý÷ß?ü‹-2Ëñ¥~.÷5cy“¡mRŽ%%ï²ä{§P(ÐÜܬ·­sðþ¬¯×Cj™ÄÒ÷¯œ±õ‡2)QΜ9„ÈʆJ'¶‘#G"22~ø!Þzë-,]º´O¾-Q—eîcJ=^LL ~øá466bÇŽÚòÅ‚ ðÑG¡ªª ß}÷bbbz<§”2‹5êM-û*aäÈ‘&ÇmJ=л{³/e;©×HÊñÝÜÜŒvhjkkã¤4d±nÄð±X7Ò7¬aÝõŽ‹‹ ^{í5ìÚµKo»Ô²”~Rö1gyHŽüšˆˆˆÌëÉ'ŸÄ¾}û´¾‰ˆú#«­E¸bÅ ½YÙ«ªª­]Vó8##)))ÈÉÉÑÎ*coo///øûûC¥RigøP©TÚç\EÄ2Þzë-8p@»:DGGã£>Â#<¢·ïÇŒèèhk‡ˆo¿ý~ø¡Þ Æ*ƒããã±fͬ\¹Ÿ~ú)Þxã ½ßϘ1_}õ®»î:½í‡²eËpüøñnc1Ç=Ø×z{<©ïcLL víÚ…eË–ém?pà€ÅÓÒ©÷æ”)S––†%K–h·ýôÓOX´h‘Þ2ã´1ÿú׿pýõ×㫯¾Âرc»œ{¨tʱ†ÆÆFm¾`(ŸÈÉÉA}}½v¥R©Í .¹ä\ýõzùDo— Ê|ðA½ÙÅ[ZZP^^nt)÷ï¾ûÎà’Žðôô4º”»f[ppp¿_y°R(ÈËËC`` ¼¼¼pã7búôé 5Ëñ¥~.KÃX>k,oúì³ÏLŽYjÞeÉ÷Î××¹¹¹zùÍ×_ÝçãZK_¯‡Ô2‰¥ï_kÄfìž6wŠåyUUUéå†òÓ‚‚TWWë½N©TjóÊ€€Lš4©K~WWWy–• °‰2…BË/¿III¸ãŽ;äG’¾Ö‘Üzë­xî¹çP[[‹'Nô霖¨Ë2÷1¥O©Tâ’K.Á›o¾©]¡øs%ÇW_}—]v™¤™:¥”Y¬QhjÙ'--­Ë¬Gü±Þ ©q›RÏ¥aê½Ù—²”k$õø#FŒ@uuµÁ<ÕÕÕ1bDié/vî܉+¯¼ÒbíNNNxÿý÷õ¶566­),,ĉ'´«½©Õj½cu®1T?Âö ù°n¤+ÖôëFX7BjnnFEEE·õ"†ÚœœœôòÊñãÇ#::ºK>Ê6 "ÀÃÃ{öìACC8€ÔÔTüóŸÿÄßÿþw¨T*ÄÆÆ">>‘‘‘ýªÌ©P(pöìÙ.yƒ]—:O©e)ý¤ìcÎòùu_dff"99©©©8vì”J%¢££ñî»ïbþüùpww—;D"""«›5k"""ðâ‹/ru "ê¿D¤§§‹ôôô¾¨––QPP ÒÓÓERR’X·nX¾|¹ˆÂÏÏO( @NNNB¥R‰èèh‘ –/_.6mÚ$RRRDzzº¨®®¶HœƒîmÐØØ(Ξ=+vìØ!¦M›&Æ/N:¥·ÿ©S§D@@€xçwDII‰(--[¶lâôéÓ&¯7ûuÞ>cÆ q×]w‰³gÏŠææfqúôi±dÉ£¯¿ýöÛELLŒ¸ãŽ;ºüîìÙ³b„ "99Y”——‹šš±gÏ víÚÕcÌâÛo¿---â³Ï>&§§¯1ôöxRßǬ¬,$¶nÝ*JKKEEE…عs§¸øâ‹û”–Þ|u~Ô{ó矣Gû÷ïuuuâ§Ÿ~—^z©Ø¸qc·ÇÿðÃŘ1cDqq±Þö«®ºJDFFšÿPÔÔÔ$²²²Ä¡C‡DRR’X»v­Xºt©ö³}øðáÚÏõΟ퉉‰bÕªUbÓ¦Mbÿþý"##CÔÕÕY$NKæo;wî´Øñ7mÚd±cRYY)222´×X“'$$ˆèèh.<<<ô®3¡T*Exx¸^>¾nÝ:±mÛ6íõf^Þw?׈3fˆŒŒ ÑÔÔ$Š‹‹ÅO>ÞbÇ×eJyý«¯¾ …Blٲł™OoêHt577 OOOñä“Oöùœ–¨Ë2å˜Rʦï¹çž...âßÿþ·ÞöÿûßÂÞÞ^<÷Üs=¦ICOes§ÓÐ6SË>_|±Ø¹s§(//ׯ,Ξ=krܦÔsi˜zoö¥l'åI=þìÙ³Å?þh0ÆãÇ‹ØØXIé‘Û¦M›„B¡_ýµÜ¡ÔÜÜ, DFF†Ø¿¿är¢£££ÁrâºuëDRR’¶œØÚÚ*w<Öô;ëFúþzÖ°nd(hhhÐkïY·nXµj•HLL±±±Úö|ƒí>‘‘‘">>^,]ºT¬]»V¯-   ÀâñKý.fÍö""sjkk‡Ë—/aaa€9r¤HHH)))¢©©Iî%¾ýö[QSS#êëëEzzº˜6mšX³fÞ¾RËRúHÙǜ塾ä×Ö ¹W–.]*"EJJŠhnn–;D""¢~!))IØØØˆÌÌL¹C°¬ÙþFdmý¡¿i¿"Ecc£ÈÊÊÒkXÑ­drww7ÚÙ´sÇâ¡Ú˜Ò¹C®£££ð÷÷sæÌï¾û®ÑŠ€ŸþYÌœ9S¸¸¸1sæLñóÏ?›t¾Þìgh{II‰HHHÞÞÞÂÁÁA\xá…ÚŽ(†Îóõ×_ âûï¿7xîS§N‰ ˆáÇ 1iÒ$ñÑGõ˜6!:2þQ£F 1f̱{÷n“ÓÓ—úrMc·nÅ¿£££^¥¿f@RR’HOOUUU²¥£?dÐæbí!R544èu¤Ý´iS¯‹t;Ò¦§§‹‚‚¡V«åNb¿cès3--MÌŸ?_Œ1B888ˆQ£F‰‡~XÔÔÔ˜|,c¤|.KÃX>+„~Þäææ&bccÅ™3g„ÑØÅ/%ï2Ç{gìüeeebñâÅbäÈ‘ÂÅÅEÄÅʼnÜÜÜ.¯1¥|Ñ×}M-ËH½†H-“˜óþ•ºÍܱuwO›« ÅrŠizg« âùç厂úéSËë«V­öööbPˆ”:’îî™ÖÖV* û|N!ÌS—Õ›:SÊVRc<~ü¸puuõõõzÛëêêİaÃŒvúïLj™ÅœéìKÝ•æu™™™"&&F¸ºº 1kÖ,qòäÉ.é“ú~šRÏ%DïîMSËvR¯‘”ã¿òÊ+F?;¶lÙ"Ö¯_/9=rÙ¼y³°³³ëÒ9l â¤ÖǺ‘®±³n„u#¦ncÝÈà§ÉŸ4íï†ò§Î“{u—?i6fddˆÚÚZ¹“§Å!4Ôdddˆµk׊ÈÈH¡P(İaÃDll¬Ø¶m›lm²GŽwÝu—3fŒpppÎÎÎbâĉbÆ ]Úù¤– „è¹€Ô}ÌUêm~mIuuu"%%E$$$h?ÓÃÃÃÅòåËÅ¡C‡ØÎJDDd@{{»·ß~»Ü¡ XBƒYèoªøã—½rìØ1@DDDoaqUUUÚ¥f³³³õggg#77mmmÚý•J%T*•vév•J¥÷<446662¦ˆˆH?O;¦û<íü™:>O­™¿) ìܹ .´Èñ7oÞŒÄÄD‹ÛZZZP^^ntycËÉ;::ÂÓÓS»||çeä5Û‚ƒƒagg'c É’2331gΜ;wNîP¼$/ÝrLaa¡Á|¥  ÕÕÕz¯S*•zùˆ¡|%$$®®®2¥L&--À°aÀŽ@|¼ÜÑP?‘””„o¼}¨f’ÌÒåuMÙÜË~›Z^B`ÕªUxöÙgqçw⥗^ÂðáÃ-¡|’’’ššŠ>ø@îPˆô Ô{óìÙ³¸ýöÛñÕW_uùÝ”)SðÁ 88ØúIpþüy<úè£x÷Ýwñü«V­‚B¡;,«illì±^¤°°%%%P«ÕÚ×999õX/âçç??¿!õ~5ü.Þ¿ðz\š››QQQÑm½HQQQ—v'''ƒyGçŸ>>>°µµ•1…¦“ú]l ô‡ 2Unn.>ýôSìÙ³Ÿþ9ÚÛÛqÅW ..×_=ÆŽ+wˆdyyyØ·o¯;Q¼÷Þ{¸ûî»qêÔ)„††Ê΀cÍö7"këýM}H¥R ¥R‰ñãÇݧªªÊà`‘ÌÌL¤¥¥áܹsÚ†ìܬR©0zôhxxxX+yDDfÑÔÔ„ÂÂÂ.Ÿ…šÇ§NBmm­vÝΑ*• ÑÑÑzÏÙÉžÌÅÁÁþþþð÷÷ï6/::Hë‘Ç£°°çÏŸ×{±Î¾º ]AAApww·dRÿŸ½;«²Nÿ8þFÙ•EÙ„$4S´eZŒ\ ÌJÊQÇÊ S3—ÜrK+s©ÜÊܪýi3#5¥MŠ-fÚ2Z®9š îâ"ˆ²ýþ8ÃPÜ/Ëçu]\œs8Ëçœó<ßû¾åÙØØ0sæLzõê…³³3¿ýö/¼ð 0­ZÒÏCÊCi-f ¡mÛ¶Ub1C¹Ù·òòà¶ÛL'©vlllxõÕWiÖ¬ýû÷gåÊ•Lž<™=zTØ‚ûeccÃ?þÈÔ©Syÿý÷MDZªìÏÍàà`Z´hÁ’%KèÙ³§õò%K–вeË Y ’——Ç’%K=z4yyy|üñÇ<öØc¦c•;'''œœœ¨_¿þU&]«©ÆÆÕT£ŠÓ{ñŠE?)%í/éÿÀµŠ###¯ø@DªžÀÀ@âãã‰'33“¯¾úŠ„„&OžÌ¨Q£'66–˜˜Ú¶m«âáJlûöí$%%‘˜˜Èúõëqrr¢]»v,\¸Î;km“ˆˆÈ êÙ³'¯¾ú*o¿ý6ï¼óŽé8""Åhï=–… ‘‘‘W<˜ráÂ>\â"éäädöìÙSlqiáN´+MiР...åµy"RÍ ¾ÒdÂÓ….ýA||¼õïÙí·ßŽ›››Á-)™“““õÿíÕ\«³æŽ;8rä©©©äååYo§Îš[RR3fÌ`ìØ±Ô¨QƒÐÐPÈSO=e:Zµ¤Ÿ‡ÜŠë]ÌpìØ±b“´˜¡íØ66 ®q"Æ<þøãÜwß}Œ5ŠÞ½{3eÊÆŽK×®]±³³3ï–EEE1`Àš7on:ŠH1•ý¹9mÚ4úõëW¬ díÚµ¼÷Þ{S].''‡eË–1iÒ$vïÞMïÞ½™œþýûóÌ3ÏTÚE‹ýD*’ªðÜ´µµeáÂ…Å.[°`¡4—;räùË_xï½÷HMMåÉ'Ÿdùò優µÔ©©FÕ¤÷â‹~r©K'^iÿˆ&Šˆ)5kÖ$**Ѝ¨(fÍšÅöíÛIHH ))‰ àééItt4111téÒEEÀDVVkÖ¬!))‰åË—süøqBBBˆ‰‰áƒ>Д‘RÖ§OÞxã fÏžÍäÉ“MDZÒ*ÄRr=P®´h{ïÞ½$''[´mggGݺu¯:iDKDª¾¢Åf%ýýØ¿?YYYÖë-6+©àCÅf"×O5ED®M‹ª©­[¡iSÓ)Dä¿5jÄG}Ä”)Sx÷Ýwyûí·?~<;v¤W¯^<ôÐC*š‘ ëܹs|þùç,^¼˜/¿üúôéÀ0¯Ú»Þ¦pyxIM5:ÄÅ‹­·QS © 4éTDªªˆˆ"""˜0aû÷ïgùòå$%%ѧOúöíKTT111tíÚ???Óq«•“'O²råJ’’’X¹r%çÏŸ§yóæ<ÿüótëÖÆ›Ž(""Re9::òâ‹/2yòdFŒ§§§éH""€ BÊ•§§'‘‘‘WºIùùùüòË/Ö";vP§Nzè! D§N4VDDÄZµj1`ÀfÏžÍK/½„“““éH""*©l¯y°¤è"¯K¢'''sðàArrr¬×÷ôô,uÁŸ< IDAT±X¤ð|ƒ t`Dä—þž]ú»véÁ‚Âß³k§lýž‰ÈÕ”FgÍÂ⑚—ÞÿµGøûû«M¤Ðb)W¿ý¹¹Ð´©é$"rêÖ­ËÓO?ÍÓO?]l‘Ƽyó7n®®®´nÝš¶mÛÅ=÷Ü£â>¹eyyyüú믬[·Žï¿ÿž5kÖpúôi¼¼¼èر##FŒà¡‡ÂÅÅÅtT©ÀÔTCDn„&Šˆ˜áèèHtt4ÑÑÑ̘1£XÃìÙ³­ ±±±*`¸E÷Ý$$$pôèQkͬY³T`#""R 8·Þz‹¥K—Ò§OÓqDDTR]ÏÒÂÉ—.hß¾};ÉÉÉìß¿Ÿüü|ìíí©S§N‰Å"…ç===ËkóDÊ\á$ž’~GŽ9ÂÁƒÉÈȰ^ÿÒI<ÑÑÑÅÎë ¡ˆ”%uÖ©ºÊr1Cýúõõ^nÎÖ­àà¡¡¦“ˆÈ ºt‘Æ–-[X»v-k×®eîܹLœ8WWWÚ´iÃÿøGî¹çš7oŽ›››éè"RÁedd°iÓ&¾ûî;Ö®]ËúõëÉÌÌÄÇLJ?þñL˜0{ï½—¦M›bccc:®TAjª!RuiÒ©ˆHåQ£F "##‰ŒŒd„ ìÝ»×ZØÐ­[7xàˆ¥sçÎøúúšŽ\!œ>}š5kÖ˜˜ÈgŸ}FFFáááÄÇÇ{Íâh1ÃÛÛ›?ýéOÌœ9“gŸ}Vû=EÄ8­P®¦<==­oÆKrñâE:T⤑äädöìÙCZZšõú…‹Î®4i$((HݤB¸xñ"'Ož¼âdÂÓ….}nGFF{žß~ûíZ#"•BE鬀]Yo®H…¦Å R©mÙƒþ–‹Tj5jÔàÎ;ïäÎ;ïäÅ_`ïÞ½Önþ|ðãÆ°¾.ühÓ¦ uêÔ1_D ÊÈÈ`óæÍlܸÑú±sçNòóó©W¯QQQLš4‰¨¨(Z´h¡¡R¡\oSììlNŸ>}KM5®´_DM5D4éTD¤º aРA 4ˆ“'O²råJ’’’:t(ýúõ£yóæÄÄÄЭ[77nl:n¹Ú¿?Ë—/'))‰o¿ý–š5kÅk¯½F×®]ñóó3QDDD®ÃСC¹ãŽ;X½z5íÛ·7GDª9„H‰ìíí¯y`¤°›VIS6nÜÈ8wîœõú—NQ¸´x$((H;hå–]iúMáù””ëAºK§ß\:Ù£ð´ˆHu£Îš"7N‹¤ÚøùghÙÒt )…ûjzõê@JJ 7ndÓ¦MlÚ´‰¹sç’ššŠ 6$22’-ZдiS ÔÂo‘*¤  €””vîÜÉ–-[Ø´i7ndÏž=àããCdd$=ö-Z´ 22’ÀÀ@Ó±EJEaaÇ­6Õ(œÈ®¦R]\m_aÑý#št*"RýÔ­[—^½zÑ«W/²²²X³f IIIÌ›7‰'BLL qqq´mÛ¶Jî_ؾ}; $%%±qãF<==‰ŽŽæý÷ß§K—.ÔªUËtD¹AMš4¡]»v̘1C!"bœ Bä¦]O7­¢CŠ.Ìß»w/ÉÉÉÅçÛÙÙQ·nÝ«N©W¯^•|ó/×§hRI“=öïßOVV–õúžžžÖçNDD±±±Åž[*B¹5ê¬)Õ3ˆ‘Ÿ¿üݺ™N""å ((ˆ   {ì1ëe‡*V$2sæLŽ9€««+7&<<œ°°0ˆˆˆ 88Xï½E*°¼¼<öîÝËŽ;øí·ßرc;vì`çÎdffàççG‹-èÑ£‡µøCkE,ÔTCªƒë™tzðàA222¬·)lÈUô9[Ò¤S>‰ˆT?ÎÎÎÄÆÆË{ï½Ç† HJJâÓO?eöìÙxyyѱcGâââhß¾=¦#ß”¼¼<6lØÀÇ ä,Y‚ӧñ ¢C‡¼òÊ+tèÐAÓ¾EDDª€ÁƒÓ¹sgvìØAxx¸é8"R© DÊÔµ†äääpâĉ'|ÿý÷—-®sppÀÏÏFBCCÕ9¡’ºpᇾâdßÿôôtëõ Y> ;i>‚ƒƒqvv6¸E""R¨4;k®[·î²)dê¬)7C‹DnÂþgÏB«V¦“ˆˆ!þþþøûûóÈ#X/;sæŒuùo¿ýÆöíÛùöÛoIII,¯Õ7nL£F á¶Ûn³¾—P±ˆH9ÈËËãàÁƒìÝ»—={öX÷»íܹ“ÿüç?\¸p ã¾ûîãùçŸ'<<œððp<<}šX÷·-þHII!''777ëï[§Nx饗§qãÆ¸ººÞ QS ¹Ue9éÔÏϯÂ/Ä‘Ê-""‚ˆˆFŽÉø×¿þEbb"Ï=÷½{÷¦uëÖÄÆÆòØcj:.@±œ«V­"//Ö­[óòË/ÿ/ç8 :At4LŸM›šŽ."""¥ÀÆÆ†2lØ0&MšDݺuMG‘jJ!Rá9::^³sVÑ—$''sðàAëAO°,V(©X¤ð|ƒ Ô%ë\úý¿ôgpé¢ËÂïI“=êׯOpp0666·HDD*ºÒê¬Y8‘L5+¶ÒZ̢Š"·â矡Y3KQˆˆÈ5¸ººÒ²eKZ¶lyÙ׊6(üؽ{7_~ù%)))Ö×e%ý/¿ôt½zõ´Aª¼Â××îs»ÒdÝ¢ûÞüñbEWÚï&Ru¨©Fõ¢I§""RÝO|||±É“'OfÔ¨Q„‡‡KLL mÛ¶-×÷9E'™¬_¿Þ:ÉdáÂ…%O2 „Å‹¡2Z´€Þ½áµ×ÀÛ»Ür‹ˆˆHÙxúé§?~<óçÏg̘1¦ãˆH5¥‚©®çÀGábƒK ¶oßNrr2û÷ï'??øßNò«M©.]* '´\i²Ç¥Æ ~¯¢££‹ ÄÖVzDD¤|¨³fŦŠ"•ÌÏ?C«V¦SˆHàééIddd‰¯Ï.\¸À¾}û8pà‡¶~>|ø0Ÿþ9‡"--Íz}'''ñõõ¥~ýúxyyáíí]ìt½zõðòòÂÑѱ<7S䪲³³9qâGŽáĉ¤¦¦rôèQRSS­—=z”ƒrþüyëí<<<ð÷÷'00???Zµje=@pp0*Þ‘"J«©ÆÆIJJ*V¼ Å 8Cœ°/r¼Ô_‹ˆˆ\­Ê–jãj‹ .^¼ÈÉ“'Kœr‘œœÌÞ½{9sæŒõú…9®4i$((WW×òÚ¼›rµm.zºÐ¥ÛYé¶YDDäJÔY³th1ƒH•“[¶@ß¾¦“ˆHçàà@ãÆiܸñ¯“™™É8tèµhäèÑ£;vŒÿûßÖ…õ™™™Ånçîîn-ñññÁ××—Úµk[?êÔ©Sì|íÚµõºC®K~~>§OŸ¶~œ:uªØùÂ×Ç©©©ÖççÙ³g‹Ý‡‹‹ õêÕÃÇÇ///ÂÃÃi×®AAAøùùáççG`` ...†¶RDªº[iª‘½u+Í?ÿœ;wÒïÄ rr.kªQ´WUoªq³“N/Ýw¤I§"""—«Y³&QQQDEE1kÖ,¶oßNBB‰«Y°jžéžDGGC—.]¨U«ÖM=NVVkÖ¬!))‰åË—süøqBBBˆ‰‰áƒ>¸ù©$66=ï¾ ¯¿|“&Y.‘JiàÀLŸ>e˖ѳgOÓqD¤RAˆÈÙÛÛ_ó`GáNü’¦elܸñšÓ2.-)ëiWšŠRxþjSQ.ìQxZDD¤º+ÏΚW+)ïÂ-f©æ~ùΟ‡»ï6DD »êõ²²²8~ü8ÇŽãĉ;vŒãÇ“ššÊ±cÇØ¼ys±û.\¸ì><==/+©S§µjÕÂÍÍ OOOÜÜÜŠ}xxxX¿®I •Ã… ÈÈÈàìÙ³¤¥¥‘‘‘QìãÌ™3dddžž~Y¡Ç©S§Š5‘)T8M°ðùãããÃwÞ‰——¾¾¾ÖÂÂÓÎÎζ\DäæX›j¸¸@B|ø!À¼y,èÝþ{Ü£,›j˜Ø—pµé²…ûG4éTDD¤|EDDãGöåïãåy/³êÓUôéÓ‡¾}ûELL ]»vÅÏÏïª÷uòäIV®\IRR+W®äüùó4oޜ矞nݺ]µyÅ sq‘#¡G=ž|æÎ…éÓáÎ;KïqDDD¤\øùùѵkWf̘¡‚1B!"7àz€= P´cïÞ½Ôÿ䎜9ÃSÿ-ÂË’ŠE Ï׫W¯ÄÎE‹SJ*ú8xð 9·çÀyÿ{œÂƒ ±±±Å³Aƒêz)""RŠn¥³fÑÏÛ·ogݺuÅ O›‚ý {êØÔ¹êâˆëYPp=‹\RRØ™™Iár-f©†¾ûêÖ…k,¾©Hœ &88øº®Ÿ™™yÙ„‡K'=œ:uŠƒröìYk¡À¹sçÈÉÉ)ñ>íííqssÃÝÝwwwk‘ˆ‡‡vvv¸ººâää„££#nnnØÚÚâé鉭­-nnn8::âääTìk€õë—ž®j222¬SåΞ=k-¤>sæ 999œ;wŽóçÏ“m½î™3gÈÍÍ%##ƒììlΟ?oý¥¥¥Y‹?ÒÓÓIOO'##ƒ‹/–øøvvv—ù6TiÒ¤‰µà£¤é2šä!"UÚþý0yòÿ AæÌ"… …*CSÒštZÒ~M:)Ç8Æd&3¬Æ0†÷ÎðþÃ9}ú4kÖ¬!11‘qãÆ1xð`ÂÃÉ‹‹#66ÖzÜfïÞ½$&&’À† pppà`úôétîÜ__ß² ïï‹ÃÀ0t(DFZŠD¦Mƒ²~l)UC† á®»îbíÚµÜsÏ=¦ãˆH5£‚‘RfíŒQü @DùcÆð§gŸåàÁƒ¤¤¤pèÐ!<ÈøöÛo9tè§N²ÞÌÙÙ™   üýý8tè)))deeY¯S§Nüýý ¤Q£FDGGãïïÏÖ»¶òVè[üðδ´mY.Û/"""7.qòäI§¦sPgŽðÈ'päÈŽ?NJJ ?ÿü3‡.Ö‰ÒÆÆooo|||¬Ý¯>líŽ]tš‡››~~~x{{ãççG«V­ðñ¡ß´iœzàÒßxêÖ­[6ß©¸Ö­ƒ¨((¡X]D¤ªpqqÁÅÅ…€€€¾maABÑiE?ÒÒÒ¬E$.\àÌ™3œ?žÔÔÔb …E …Å7ª°È,Å(…E —M ¨Q£®®®äååaoo_âý]O±IÑ¢+IOO·N©-”™™i-Ä(zúj6WãêêŠÝU j‚ƒƒqppÀÍÍZµjáááqÙ„—¢S_o8‡ˆH•v… 7êz›jäæærüøqŽ=ʱcÇ8zô(Gåøñã>|˜­[·’à›@Æî rûþƒÞÞÞøûûãååEZZ©©©×܇âííÍwܵ—¯¯/õë××d'‘ d#pÇ—xÉzYíÚµ‰‹‹#..ŽóçϳzõjV¬XÁܹs™8q"¡¡¡ìÞ½ooobcc1b>ø NNNå¿­ZÁÚµðñÇ–É! °a–Ó&òˆˆˆÈ kÕªwß}7óæÍSAˆˆ”;„ˆ”—¡~}jŒM£#AAADEE•xÕ¬¬¬ËŠE8@›6m $ ‚‚‚®xà¡€Ö²–A¶ƒXÇ:lÐâ-‘ʬnݺ,©»„c#9$™Ðá¡%^/++‹#GŽpìØ1Ž;f-9räÍ›7ÇÇLJúõëãëë{íÅ ø÷ì‰ÿÀp¢©‚ `ýz1Ât‘ ËÑÑGGG¼¼¼Jõ~ ‹EΞ=Knn.iii\¼x‘ÌÌL.\¸`mR8ãÒÓ…÷STvv6ßÿ=;vì {÷î%v/ÏÊÊâÂ… WÍX·nÝk.Š-œ‚RTa±Æ¥_wvvÆÁÁá²Ó...ÖÂlmm©U«V±û‘2RF… 7ÊÖÖ???kËKPÀmÜF<ñ 5¼XÑHáþ‘ÔÔTüýý/+ò¨W¯>>>Ø–ó6‰ˆˆÈ­ùX–± gJ~oêääDçÎéܹ3ùùùlذ+Vйsgþð‡?TŒ‰^66110{6¼ñ|ðLšþ³šõˆˆˆTñññôë×'N”úñ‘«Ñ^M‘ò°l$&Br2\GgAggg »¥‡µÁ†ù̧-XÄ"žæé[º?1ë4§y×y‰—¥äb°¼–hذ! 6,îÞÝÒ•*>¶lÿv‘jbçNHM…?þÑt‘jÇÉÉ '''<==Ký¾ÓÓÓ fäÈ‘Œ?¾Ôï_DDª€¢… þþÆ A®×Ö°}<ÅSÔ©S‡:uê\s«ˆˆˆT^0˜Á´¥-óøuݦF´mÛ–¶mÛ–qº[àäd™ Ò«L˜`yýõî»0cTäÜ"""Ÿþô'† ÆâÅ‹y饗®}‘RRJÜEª¸³gaèPxöYh׮ܾ)MéG?†1ŒSœ*÷Ç‘Ò3†1ØbË(F•ÿƒÏ™éé0n\ù?¶ˆ˜µn8;ÃwšN"""¥húôéðâ‹/šŽ"""Í¡C0h4n «VYö ìÞmiQA‹A±ˆÖ´&Œ[k¶%"""•Ã"ñoþÍ»¼‹ UpzF½z0>üü3¸¸Xö<ñ„¥hWDDD*$'''ºwïÎܹs)((0GDª„ˆ”µ# 7¦M3áu^Ç{Æ£n"""•Õv¶ó>ï3•©Ô¢Vù¨WÞ~Û2¦ü»ïÊÿñEÄœuë uk°·7DDDJIZZ³gÏfذaxxx˜Ž#""ʼn0d„†Âòå0wn¥(8ËYþÉ?yЧLG‘rAcC<ñ4£™é8e«ysøúkËë³_~ðp5ÊÒœTDDD*œ¾}û²gϾùæÓQD¤QAˆHYúáX¸fÎOOc1jQ‹©Leóø‰ŸŒå‘›7„!4£æÏæB<ý4tè}úÀùóærˆHùúö[¸çÓ)DD¤½õÖ[Ô¬YSÓADDÄâÜ9˜:6„¥KaÂعžy¦Â‚ZÆ2òÉçIž4EDDDÊÁ$&qžó¼Ê«¦£”ŸØXر&O¶L ƒ /Ït2)¢iÓ¦üá`þüù¦£ˆH5¢‚‘²rñ"<û,´oݺ™NCOzr/÷2€ä“o:ŽˆˆˆÜ€ù˜d’™ÉLj˜~ ÿþû𠝼b6‡ˆ”߇”xàÓIDD¤”œ:uŠÙ³g3|øpÜÜÜLÇ“²²`Ö,¸í6Ë”ó!C`Ï9M§»!å¯t¡ ž˜kÎ%"""åc{˜ÉL&0ºÔ5§|ÙÙÁ A–×l]»Â€p×]–¦>"""RaôíÛ—O?ý”ÔÔTÓQD¤šPAˆHY™6Ͳpjî\ÓI°Á†wx‡Ílæ}Þ7GDDD®S6ÙŒ`=èAQ¦ã@ýú–®¡o¿ ë×›N#"e-9\\,ED¤JxóÍ7qpp ÿþ¦£ˆˆˆ)99–nÒ ˜1–I {öX&ƒTÂbÁÝìf=ëyš§MG‘r0”¡„B?ú™ŽbNíÚ–ÂÞmÛ,Çmî»Ï2AdÏÓÉDDDxâ‰'puuå¯ý«é("RM¨ D¤,ìßo¼ãÇCƒ¦ÓXEÁ@ò2/s‚¦ãˆˆˆÈu˜ÍlRIe SLGùŸçžƒèhË4´ìlÓiD¤,­Y÷Þ öö¦“ˆˆH)8yò$ï½÷£FÒt‘ê(? , ´,üýw˜2<£GÆÉÉÉt)KGŽ@ß¾pç°?$&¿þÍš™NVêŽr”/ø‚çxÎt)c£-¶¼ŒŽE\·† -Á_}§NAóæ–׉©©¦“‰ˆˆTqqqÔªU‹%K–˜Ž""Uœ BDJËÁƒ0y2Œ AA¦Ó\S(¡ a£ÍQT*""R‘lf3ãoLa öØ›ŽsuõëÃ;ïÀœ9–&"R9„•“cù|ö,,_n™äçg)‘Jå7ÞÀÓÓ“gŸ}Öt)+™™0u*4n +WZÞ§oÞlé]E½ÏûxàÁ#ø€±cÇj:ˆˆHU”“ X:@O cÆÀ®]5kšNWfòÉçC>äižÆÓqDDD¤ f0wr'=éi:JåU£ôêe™ ýâ‹0q"4m ¦“‰ˆˆTy=zô`óæÍlݺÕt©ÂT"R’“áŸÿ„Y³À¡òxpÆ™·y›Å,æ¾1GDDD€/ù’Õ¬f2“±ÁÆtœë7gäåÁ!¦“ˆÈͺÚ{™š5ÁÓþú×r‹#""¥ãõ×_ÇËˋ޽{›Ž"""¥-1""à… sgعFŽ„jP¸ŠUìg?½Ñÿ7‘ªlËø–o™Ãjh‰Ó­sq ,Ä­[ÓOB»vð믦“‰ˆˆTYQQQ³dÉÓQD¤ Ó»%‘[uñ" > =d:Í {ŒÇèD'^àrÈ1GDD¤ZË'ŸÑŒ&–XÚÑÎtœS·®¥#é¢EðÉ'¦ÓˆÈÍpv¾ò×òó\'q IDATáo³ü®‹ˆH¥qàÀ>üðCÆŽ‹C%jb"""×ðãpÏ=ðÈ#pçðÛo0>x{›NVn²{¹—0ÂLG‘2ržóŒd$½èÅÝÜm:NÕâï‹[^W^¼‘‘– "ÇŽ™N&""R娨ØÐ½{w–,YB^^žé8"RE© DäV½ó¤¤Àô馓ܴYÌb{x—wMG©Ö>â#6³™ÉL6åæÄÆÂ3Ï@¿~pü¸é4"r£\\J¾¼fM7ÎÒ)NDD*•×____žzê)ÓQDD¤48O7lh™ ’m:™ˆˆH•Ò³gOŽ9·ß~k:ŠˆTQ*¹§OÃoÀK/Aƒ¦ÓÜ´†4d8Ãy…W8ÌaÓqDDDª¥\r™ÈDþÌŸ‰ Âtœ›7s¦eÊ@ß¾¦“ˆÈ*© ÄΚ5ƒ±cË?ˆˆÜ’””-ZÄøñã±··7GDDnÅùó0q"„…Á¯¿ÂŠðÕWв¥édFü…¿à‚ ò¨é("""RFqˆ7y“1Œ¡>õMÇ©Úll .vì°4š1BC-D L§©7nLË–-Y²d‰é("RE© DäVŒoéÂ5|¸é$·ìe^Æ /†Sù·EDD¤2ZÄ"RHaãLG¹5µjÁ‡Z§hg†HåâæVü¼ 88À'ŸX CDD¤RyõÕW  W¯^¦£ˆˆÈ­HL„ðpxóM˱ˆ­[-:«©|òYÈBžâ)œq6GDDDÊÈp†ãƒƒl:Jõáä#GÂÎðÐCл·e2Ý÷ß›N&""R%ôèуO>ù„¬¬,ÓQD¤ RAˆÈÍúÏ`Á˜4ɲð±’s‰9Ìáoü¯øÊt‘j%‡&1‰gx†BLǹuíÚÁ Að °¿é4"r½.}_SP Vêiˆ""ÕÕž={X¼x1ãÇÇÖÖÖt¹;wBÇŽðÈ#ЪüöL˜`)Ú®ÆV²’}ì£ýLG‘2²žõüƒ0é8âh:NõS¯ÌŸ?ýd)ùãá‰'t¼GDDäuïÞ¬¬,MG‘*H!"7kØ0Ëxögž1¤Ôt¤#±ÄÒ~\à‚é8"""ÕÆû¼Ïaó2/›ŽRz¦L±,"ïÙòòL§‘ëQ«–e*X&!ÆÇC·nf3‰ˆÈMyõÕW ¦{÷ˆˆÈ:sÆÒd¡iS8yÖ­ƒeË Àt² asxiD#ÓQDDD¤ ä“Ï ÑŽv<Â#¦ãTo-ZÀ7ßÀòå°iDDÀ¨Q‘a:™ˆˆH¥äííMtt4K—.5EDª „ˆÜŒ¯¿†¤$ˈöš5M§)Uïò.‡9Ì,f™Ž"""R-\àoðñÄÓ€¦ã”XºÔrà7L§‘ëáâ5jXŠA‚ƒaÆ Ó‰DDä&ìÞ½›>úˆ &h:ˆˆHe’Ÿ‹C£FsæXº2·ic:Y…±‡=¬b`:Šˆˆˆ”‘ø€_ù•hßd…k™V÷Æ–É!Âj&""rzôèÁ_|ÁñãÇMG‘*F!"7*?ß2$&Ú·7¦ÔÈHF2‘‰¤b:ŽˆˆH•7Ÿùœä$£e:J鋈€É“áÕWaÃÓiDäZ\]-ñllà“OÀÙÙt"¹ 'N$$$„'Ÿ|Òt¹^ß|Í›CŸ>ð§?ÁΖ‰}5t¯¨9ÌÁæaÓQDDD¤ œå,ãOúÓ”¦¦ãHQvv–)v{ö@×®0`Üu¬]k:™ˆˆH¥òè£âèèHBB‚é("RŨEœÈúðCزÅÒ©«ŠÉH–²”a #½ø)+Ùd3iô§?~ø™ŽS6^|V¯†ž=á—_ V-Ó‰DŒÊÌÌ$++‹ŒŒ .\¸@VVçÏŸ';;›sçΑ““Czz:yyy¤¥¥‘››KFFyyyœ={ö²û+¼’çâÅ‹Öó®®®ØÙÙ]v=lllxô§Ÿèü½uk~]º”5jàî®®8::âä䄳³3¸¹¹akk‹‡‡5kÖÄÝÝýŠ#""åc×®]üýïgéÒ¥Ô¬bSmEDª¤C‡`ÈøøcKª?†ÐPÓ©*¤,²XÄ"F0‚šèœˆˆHU4 \ä"ão:Š\IíÚ0kôëÇý÷Z^ÇΜ ·Ýf:ˆˆH…çââ£>ÊÒ¥Kyá…LÇ‘*D!"7âÜ9?žÞÒñºŠ²Çžwx‡ö´g%+yˆ‡LG©’Þå]ÒHc#LG);66–‚Ú;î€Áƒ-§E*¹3gÎpüøqNœ8Á©S§HKK#==ôôôb§ÓÓÓ9sæL±Ërrr®yÿîîîÔ¨QOOOjÖ¬I­ÿR-Þ(TôëEÕ®]ç">ÒÒÒ((((v¼¼<öíÛg=ÿ Y¼d%'[‹Prrr8wîÙÙÙœ?þšù]\\pwwÇÝÝëiwww<==‹]æááA:u¨W¯^^^899]óþEDäÊÆOãÆ‰‹‹3EDD®&7Þy^y|}aåJèÔÉtª m)KÉ$“gxÆt)¿ó;s˜ÃLfR‡:¦ãȵ4n ‰‰œ C‡BX˜¥HäÕWÁÝÝt:‘ íÉ'Ÿ$66–C‡áïïo:ŽˆT*¹S§Âùó–ƒ4U܃<Èc<Æ ÑŽv8âh:’ˆˆH•’I&oò&ˆ>¦ã”-ooøË_àᇡ}{èÖÍt"‘ˤ¦¦røðaŽ=ʉ'8qâÇŽ³ž.zyÑÉÎÎÎÖ"‡¢Å·Ýv—}ÍÅÅWWWpvv¶Nܨ(Ó5»Æ× §ddd››KZZùùù¤¥¥qîÜ¹Ë c OïÝ»·XLᔢ\]]ñññÁÇÇ///¼½½­§½¼¼¨W¯>>>âââRvß‘Jhûöí$$$°lÙ2jÔ¨a:Žˆˆ\ɦM–¦S¿þjY<7a8jÿûµÌcÝè†7Þ¦£ˆˆˆHx‘ %”çxÎt¹ÑÑ–×·~ãÆÁÒ¥–Ï€­–¤‰ˆˆ”$::777>ûì3M ‘R£Wß"×ëða˜>&N„ºuM§)3™I8á¼ÍÛŒaŒé8"""UÊlf“M6Ãf:JùèÔɲà¥_?hÓM'’j$==ƒràÀ:d=}ðàAëùììlëõ­Å^^^øøøÐ¬Y3¼½½­E ¾¾¾xyyQ·nÝ QÄQž\\\pqqÁÓÓó–ï+33ó²â›cÇŽ‘ššÊ‰'Ø»w/?üðƒõkyyyÖÛzzz@`` øûû@PPþþþøùùáààpËED*‹W^y…ððp}ôQÓQDD¤$ii–fSsæ@TlÞlé¤,×´Žulbó˜g:Šˆˆˆ”$’ø‚/øš¯±Õ2¦ÊÇÖâãá‰'`Ê9æÍƒ·Þ²4 ‘bèØ±£ BD¤Té”Èõzí5K!ÈÀ¦“”›ËX&2‘ît'˜`Ó‘DDDª„L2™Ît^äÅê5ú|útX·zö„¯¿†š5M'’*äСCìÞ½›ßÿ½Øç””222¬×sww·„††rÿý÷+ ¨_¿>nnn·¤z),.iРÁ5¯[PP`-¹´ gûöí|ùå—>|˜ .XoS¯^=‚‚‚ µ~4lØÐÐPÜÝÝËpËDDÊ×¶mÛøôÓOùä“O4DD¤"JL„þý!'ÇÒ=ùÏÓ©*9Ì¡-hE+ÓQDDD¤”]ä"ÃFWºr÷™Ž#·ÂÃÃRòì³0f ÄÄX&ˆÌ˜Mš˜N'""R¡téÒ…^½zqòäIêV“æä"R¶T"r=öüæÎ…jÖavCXÄ"3˜å,7GDD¤JXÈB²ÈâE^4¥|9:¢Eкµ¥3ÔÈ‘¦I%súôi¶mÛÆ®]».+ü8þ<nnnÖEÿ<ò 4°NŽ T±G%fccƒ··7ÞÞÞÜqÇW¼Þ±cǬÅ"`ïÞ½ìÞ½›~øýû÷“““€——×e…"5"<<{{ûòÚ,‘R1nÜ8î¼óNyäÓQDD¤¨={`ÀXµÊÒaÆ ¨SC”‚cãŸü“ùÌ7EDDDÊÀ,f‘B ÿâ_¦£Hi …eË,Á†…æÍ¡woxýuðò2NDD¤Bˆ‰‰¡fÍš¬\¹’^½z™Ž#"U€ BD®Ç¸qÕðŸ¯=öÌc÷qI$CŒéH"""•Z9Ìd&ñÄãE5Üñݼ9Lšdé ‘‘¦I”““î]»Ø±cÛ·ogãÆìرƒ}ûöQPP€ƒƒ~~~„‡‡Ó©S' @HH!!!c£N»Õš¯¯/¾¾¾´juy÷àÜÜ\k‘HÑ>úˆ]»v‘››‹­­-„‡‡IDDááá„……©ë¾ˆTH›6mbùòå¬X±BÿED*ŠœË”Ì àöÛáûïá0ªRšÇ<Üq§ÝLG‘R–J*“˜Äp†Ó€¦ãHi»ÿ~ظ–,±4KH°|<¸Ú5b¹”››÷ß?Ÿ}ö™ BD¤T¨ DäZ¶o‡>²|ØVÏ_™{¸‡'y’xv´ÃgÓ‘DDD*­%,áGÂÓQÌ:þõ/èÖÍr0 V-ӉĠ3gÎðã?òË/¿°yóf¶mÛÆþórss±··',,Œ&MšÐ·o_š6mJ“&M0[*)[[[kñÐ¥.^¼È®]»Ø¶m[¶laëÖ­,Z´ˆýû÷àîîN“&MhÚ´)Íš5£eË–4kÖ ;;»rÞ ‘â^yåZ´hÁÃ?l:Šˆˆ|û-ôë))–o£Gƒ&ÐÝ” \`óèG?q4GDDDJÙ˼Œ+®ŒDÓÄ«¬5,WÞ|&N„>°4‹‹3NDDĨ.]º0tèP233qqq1GD*¹ê¹º]äFŒÕþÍè fИÆLe*™h:ŽˆˆH¥”O>oñæÏh:Ž95jÀÒ¥–i!Ï=ÿø‡éDRNrrrزe ?üð?ýô?þø#»ví¢  €   š6mJll,cÇŽ¥I“&4jÔH‹í¥ÜØÛÛÓ¤Iš4iB·nÿë>œžžÎ¶mÛØºu+[¶laÛ¶müío#=='''Z´hÁ]wÝÅÝwßMëÖ­ 2¸"RÝlܸ‘Ï?ÿœÏ?ÿ\ÓADDL;~†·t@~øaøâ Ëäq¹i‹YLiô£Ÿé("""RÊ6±‰¿òW–°´²Êsq±LÏëÓÇR0ýä“0ožeª^³f¦Ó‰ˆˆÑ¥Kú÷ïÏêÕ«éÒ¥‹é8"RÉ© Däjþýoøì3X±Â²p±óÅ—ñŒg4£éAnçvÓ‘DDD*Où”ì$ÓQÌóñ±…<ø ,Xññ¦IHMMeíÚµlذü‘M›6qþüyÜÝÝiÕª]»våî»ïæî»ïÆÛÛÛt\‘¹»»Ó¶m[Ú¶mk½¬  €;wZ ›¾ùæÞyçrssñõõµˆ´iÓ†?üá888Ü©ÊÆGëÖ­éÔ©“é(""ÕWAüßÿY¦a::²eе«éT•^¼Ë»t§;¾øšŽ#"""¥¨€3˜»¹›nt»ö ¤êð÷‡Å‹aà@2Z´€=`Ú4ðÕk>©^|||hݺ5Ÿ}ö™ BDä–© DäjF†V­,ݼ„y‘Å,f ù’/MÇ©t¦0…ÇxŒpÂMG©î¿F‚Aƒà®»àÎ;M'’[”™™É† HNN&99™_~ù5jDdd$qqqDEEѼysjTó‚k©Ülll #,,Œ§žz øßœuëÖ±qãF–.]ʘ1c¬SD¢¢¢ˆŽŽæÞ{ïÕä)6là‹/¾`ÕªU¦£ˆˆT_[¶ÀóÏÃÏ?Cÿþ0i¸ºšNU%|É—la ‹Yl:Šˆˆˆ”²øˆïùžø4í²ZjÕ ¾û>þFŒ€† aØ0Ë1#GGÓéDDDÊM—.]˜2e ¹¹¹ØÚj9·ˆÜ<ý¹’テի᫯ÀF;!l±å]ÞåîáS>åQ5IDD¤ÒXÍjþÍ¿™Ë\ÓQ*–W_… à‰'`ãFps3Hn@nn.›7o¶€¬]»–‹/Btt4#GŽäÁÄÃÃÃtT‘2gggGdd$‘‘‘ÖËŽ=ʺuëHNNæ£>bêÔ©¸ººÒºuk¢££‰ŽŽ¦E‹Øè=§ˆÜ„W^y…¶mÛòàƒšŽ""Rýœ?¯¼3fÀÝwæMд©éTUÊ fð ÒŒf¦£ˆˆˆH)Ê"‹ÑŒ¦7½iE+ÓqÄ$ˆ‹ƒ˜˜=ÛR\ýá‡ðúëðç?kŽˆˆT >ú(#FŒ`íÚµ´k×Ît©ÄÔ’UäJÆŽ…´t®«(¢èIO1ˆL2MÇ©4&3™y–´4¥b©QÃ2ü̈7F®CZZ ôêÕ‹ÚµkÓ²eK,X@HH .äСCìÙ³‡ùóç§b©ÖêÕ«G\\óçÏçÀìÙ³‡·ß~OOO¦M›FË–- ¢oß¾$&&’““c:²ˆTëׯgõêÕ¼öÚk¦£ˆˆT?_ wÜ œ9–æR*)UÛÙÎjV3„!¦£ˆˆˆH){ƒ78Ã^åUÓQ¤¢pr‚‘#açNèÔ z÷†Ö­aýzÓÉDDDÊ\Æ iÒ¤ Ÿ}ö™é("RÉ© D¤$Ÿk×Z:VËeÞäM2È`“LG©~â'¾æk^æeÓQ*&??KQȲe𗿘N#%8räsçÎ¥}ûöx{{Ó³gORSSyë­·HII±€ôêÕ ???ÓqE*¬âããY¶l©©©üôÓO<õÔS¬_¿žÎ;S¿~}ž}öYÉÎÎ6WD*°qãÆÅýjd""R~ÒÓaÐ ˆŽ†F`ËKcu..u3˜ÁíÜN:˜Ž""""¥è˜Á ^áêQÏt©hêׇùóá§Ÿ,E"QQ–éòû÷›N&""R¦ºtéŠ+LÇ‘JN!"—*(€qã KK×¹Œ>¼Ê«¼ÍÛüÆo¦ãˆˆˆTx“™Ì]ÜÅýhÁÞuêÆAÿþ–E5bÜÉ“'™1cmÚ´! €áÇãîîÎ_ÿúWRSSù׿þE||<¦£ŠTJ5kÖ¤U«V¼öÚklݺ•]»v1|øpvìØA—.]ðòòâ‰'ž`ùòåäææšŽ+"Ⱥuëøê«¯xýõ×MG©>’’,S@þñK#ƒ¤$0ªJJ%•¥,e(C©¡Ã˜"""UʆPŸú¼À ¦£HEÖ¢|ó ,_›6ADŒ¦“‰ˆˆ”‰N:‘’’Âo¿i¦ˆÜ<íI¹Tb"üú+Lœh:I…ÖŸþDÁ@šŽ"""R¡íb+X¡é ×cÒ$ˆŒ„îÝ!+Ëtšj)??ŸU«Vñä“OâççÇ„ hÔ¨Ÿ~ú)'Nœ !!îÝ»ãîîn:ªH•ʈ#ذadÚ´iœ:uŠÇ{ŒÀÀ@^~ùevïÞm:¦ˆTcÇŽ%::š{ï½×t‘ªïøqèÕ bc- ¤¶m³œ—2óïáŒ3=éi:Šˆˆˆ”¢¯ùšòOf1 ‡ÿgï¾ãkº?Ž¿nÄŠ±G#”P5c«Uj–RU«4MKŒ¦5*Ô¡F•Ú´U³ÚÚA‹ZEk·DäkEe *Väþþ¸m~Ò(!‘Onò~öqí=÷Üs^÷T¿ßœÜó92šÎ{Тœ< cÆÀgŸÁsÏÁœ9pÿ¾é2‘$åååEŽ9Ø´i“é±c"òoãÇCË–ðüó¦KR´t¤c6³ÙÎv–³ÜtŽˆˆHŠ5é¦0-ha:%åst„¯¾‚Ë—¡·&§K—.1~üx<<â#p`ÃL§HjÒ°!üò LŸË–Añâ¶D¢£M—‰ˆˆ<777<==Ù´i“é±S"òQ£ÀË ^|Ñt‰Ýy›·©D%|ñÅŠÕtŽˆˆHŠð=ßsšÓøâk:Å>eÈkÖÀµkй3Xõ3Fb„††Ò¥Kj×®Mtt4?üðßÿ=µk×6–"øøøðÒK/1iÒ¤8Ë;F³fÍÈš5+Y³f¥I“&;v,Î:‹‹ÅÂÉ“'©V­NNNÔ®]›   Ž?Ž——ÎÎÎÔ«WsçÎÅÛ÷±cÇhÚ´iì>š6múÐ}<¸/‹Å‚··÷u>Ìš5kèܹóC_ûöÛo©Y³&™2eÂÝÝ???nükpÖõë×ñóó£X±bdÊ”‰üùóóæ›o²ÿ~BBBâ4ñ×_ÅYöà:çÏŸ§U«VdÍš•¼yóÒ©S'"""âµ>|˜—^z‰,Y²-[6^~ùå4ûËÑL™2áããÃÉ“'™>}:kÖ¬¡xñâøûûs÷î]Óy"’„Ö­[ÇÞ½{ùè£L§ˆˆ¤.÷îÙnHP©ܹ{÷¸q1£é²4g<ãiCÊPÆtŠˆˆˆ$‘œ`6³ÃÝ8K’ž£#øøØfš÷ö†m³ý­_oºLDDä©4lØmÛ¶™Î;¥!"‡úu0Lw¥x80“™ìa_ð…é‘aÓhLcžã9Ó)ö+~øâ ÛÏiÿºP_ÆjµòÉ'ŸP²dI~úé'Ö®];KˆÄõöÛo³víÚØçAAA4jÔˆ-ZLHH:u¢U«V\¸p!v=ë߃•üýýY°`¡¡¡”/_žN:1|øp.\ÈÅ‹)]º4ýúõ‹³Ï   ^~ùeÚ¶mË™3g8sæ mÚ´¡I“&ÅÛ‡Õj}Ì;÷‰:fëÖ­T®\ù¡¯µjÕŠ®]»ÉÞ½{‰ŒŒ¤wïÞqÖéÚµ+Ù³ggÏž=\¿~õëׄ——îîî9r„*Uª`µZ)Q¢ÎÎÎÄÄÄP¢D ~ýõWÜÝÝc?Ÿ··7=zô 44”Eÿþqï|êÔ)Ú·oÏ€ cï޽ܼy“&Mš<ò³¦véÓ§ÇÇLJӧOóÁ0~üx*V¬È¾}ûL§‰H9r$-[¶¤jÕª¦SDDR½{¡B1Âöøùg¨XÑtUš´™Íàh:EDDD’~”¥,Ýèf:ER3WWÛ î£GmBš5ƒFC„( IDAT 7MIIêÕ«Ç‘#G7""vHBDƌ矇¦MM—Ø­JTâmÞ¦?ý¹Æ5Ó9"""Fà[ÙJú˜N±õêÁرðᇰy³é»ráÂ5jÄ|@¿~ý8~ü8-Z´0•b•.]šàààØçþþþøùùñî»ï’;wnræÌI§N2d'NŒ÷þž={Rºti\\\κ={ö¤I“&8;;S¨P!ÆŽËæýwïïïÏ!Chذ!NNN”)S†Ï?ÿü±Ç)­Èœ93þþþ;vŒ‚ R«V-FŒAtt´é4I„o¿ý–Ÿþ™¡C‡šNI¢¢à½÷ fMÛŽ·ÝM8]:ÓeiÖXÆò/Q™‡Xû³†5la S™Šƒ.M’äàá˗ömnìýÎ;fºLDD$AêÖ­‹ƒƒC¼ï”EDBg]"gÎÀªU0x0X,¦kìÚÆC Ãn:EDDĨiL£%hLcÓ)©C¿~ðê«ðÆbºÆ.ìÞ½›Ê•+söìY~øáüýýÉ”)“é¬íŸY*þHûöíã­×¬Y3¶lÙoyÅî$œ/_¾xË (@Ø¿¾x ä•W^‰·­Ö­[˜ î'í|PDD... Ú««+‘‘‘q–U¯^–-[²qãÆØE‹w,»víÊìÙ³cŸÏ˜1ƒ÷ß?Þ>êÔ©ç¹»»;¡¡¡q–mÛ¶ ÄYVªT©xûLëŠ/ÎÆ™9s&'N¤~ýúñþü‰ˆ}°Z­Œ5ŠÖ­[S¥JÓ9""öo×.(_-‚¹sm7(ZÔtUš¶}üÀ|ȇ¦SDDD$‰Üå.H:P‡:ƒHRªW´ý¼ÿí·PªŒwî˜.y$^xá¶oßn:ED섈ŒmûÂçÕWM—Ø=W\Ç8f2“_øÅtŽˆˆˆ׸ÆR–Ò›ÞºëUR±X`Þ<È—Ú´[·L¥h_~ù%õêÕ£zõê>|˜5j˜N² 'Nœ xñâ±Ï#""pwwÇb±ÄyäË—‡ LÊš5kì?;88èÆ8::Æ[†··7nnn8::ÆnóßV¬XAÕªUñõõ%GŽÔ©S‡O>ù„{÷îÅYï7Þ`ùòåDEEÌÙ³giúÙ]]]ã<Ϙ1ãCY®\¹ù¹ÄÆb±àããî]» ¡fÍšý3!")Ïš5k8pà}ô‘éûvë /¾%JÀѣЭ›n•Œf4^xñ"/šN‘$2‰I\àcc:EÒ*èÒ‚‚ Oð÷‡çŸ‡+L—‰ˆˆ}ú0iÒ¤ØY*äñfÍšÅÖ­[yÿý÷c—5nܘ~ø!Þº;wî¤bÅŠI²ß† ²zõêxË׬YCÆ ã,û¯‰éÌ™3'ׯ_·|÷îÝŒ1ww÷ØD6¸Äb±páÂÀ60¥}ûö¬[·ŽÍ›7Ç[·k׮̜9“µk×Ò¥K—Gv=Ê‹/¾H```œe‡¦L™2O½Í´ _¾|lÙ²gggš4iÂÍ›7M'‰H¬\¹’£G2tèPÓ)""öéömÛ¬ µkƒ»»mVÍ ’‚Œf4¥(E3š™N‘$2€¸àB?ú™Nùnn¶ï—öîµ'T¬h›ADÛŠˆH S»vmصk—é±3ºBHÒ®ÈHX°úõƒôéMפ*å(GOzòŽîÞ+""iÇF6B=éi:%õjÙÒvAO¯^ðóϦkRŒððp^ýuš6mÊäÉ“Mç¤x·oß&$$„e˖ѰaCfΜɖ-[pqq‰]ÇßߟádzråJ"""¸qãßÿ=:t`øðáIÒ1|øp˜;w.W®\!,,Œyóæo dÏž=Ü»wÍ›7S¨P¡Dw–,Y2v@ǃjÔ¨Á€ áîݻѣG‡nÃÛÛ›ãÇsçÎþøã&Mšo0 @óæÍ ¡]»v899%ôÅ3tèPFŽI`` 7oÞäÈ‘#tïÞÞ½{Ç®M¶l”÷o9sædݺu\¹r…^½z™Î‘Lj‰‰aÔ¨Q´oßžråʙα?{÷Úf™5Ë6+Ȇ ð÷ÏÐ’2œå,_ó5C‚ƒ¾®Iö²—¥,%€œxúߊ<3U«Â®]°lìÜ %JØf¹}Ût™ˆˆY³f¥\¹rìÞ½ÛtŠˆØý†UÒ®Ï>³ éÞÝtIªô1“‘Œ|ÄGIºÝjÕª±nݺ‡¾vøðaÜÜÜˆŽŽàǤjÕªdÊ” wwwæÍ›gý7n0`À<<£FÂÝÝŒ3R²dIf̘oÝï¾ûŽjÕª‘%K²dÉòÈŸmžÄˆ#X°`ÁÎ$‘–ýsL, ‹…ìÙ³S³fM–.]JÇŽ9xð qÿ»uwwgÕªU,[¶ŒâÅ‹“?~FŽÉôéÓiÕªÕC·ý¤Ë<<<ذa«V­¢X±b-Z”•+W²aÃJ”(§' €Ž;âì쌯¯/3gÎ|¢Î‡iРŒ·|ñâÅܾ}///²fÍJëÖ­iÖ¬Y¼þÀÀ@2gÎLݺuÉ–-Õ«WçöíÛ,Z¦@²eËöÐ;6ÿµ¼\¹r,Y²råÊEóæÍi×®]œ+/^¤dÉ’üìiU¡B…˜7o .ä§Ÿ~2#"ðõ×_süøq† f:EDľü3+H­ZP¸0;f7³‚¤„sËä4‘‰¢íio:EDDD’€+}éKMjò*¯>Õ6t-€$ ‹ÚµƒãÇaØ0€’%m3ˆX­¦ëž¹´vÞ!"bjÔ¨¡!"òÄMˆqïžíÎ`>>àìlº&UÊF6Æ3ž®ÿå…W’l·oß¾L:5ö‚´MŸ>wß}GGG~ÿýwÚ¶mË´iÓhÖ¬¿ýö¯¿þ:Å‹çÅ_ k×®¸»»H¸qã»wïfúôé4oÞ7’ú98À_@•*ðúë°y3¤Kg4©gÏžäÊ•‹ÀÀ@òçÏÏèÞ½;¹sç¦}{ÛÅ-{÷îåÍ7ßdúôé4jÔ«ÕÊæÍ›éܹ3›6m¢J•*Oµï‹/2gÎf̘««kR~¬TÃú”_âxxx°råÊ'ÞvB—m€Ã† ÛÒ®];Úµk÷ÔÓºukºuëÆ[o½gyžwî\Z´hAžû &L€%KŒ¦¼ùæ›ñ–U¯^S§NÅ>?tè-Z´ˆ·^Ë–-õËœíÛ·óÒK/‘!C†§Þ†¤]&LàÇ|¦û¨U«uêÔ¡B… Ït?ÿX¸p!Y²dI–}Ù«F‘>}z¶oßn:EDþåþýûŒ=šÎ;SªT)Ó9"")ß¾}P±¢mFðO?…íú÷þ&Ï-“Ëe.3‹Y d ™Èd:GDDD’À(Fq›ÛŒdd¢¶£kĨŠáÇaíZ8tÊ”± <¿qÃtY’K ç""ö¬hÑ¢,XÝ»w›N;¢!’öüðüü3ôíkº$MðÄ“>ôa0ƒ #,I¶éããÃêÕ« lK|úé§ôêÕ+vóçÏS²dI,Kì#GŽ„††Æ®³lÙ2ÂÂÂ(Q¢¥K—¦[·n¬]»«Õš$""’vìcG8B7º™NI{ºv…þýÁÛÛö‹zCÜÝÝã-suuåêÕ«±ÏÿøãØ/¬”7o^._¾üÔû>wîÅ‹ê÷KÚæèèÈçŸþ̶oµZ¹{÷nœ;‹‰y™2e¢`Á‚œ?ÞtŠˆüË’%K âÃ?4""’²Ý»þþ¶YAÜÜìzV™<·L.ã .øàc:EDDD’ÀÎð Ÿ0‚ä"W¢·§kĸ-àäI3ÆvS²çžƒ9s &ÆtY’I ç""öÎËË‹½{÷šÎ;¢!’ö̘5j€——é’4c8ÃÉLf>$i.fÈ•+mÚ´aöìÙ¬]»–|ùòQµjÕØu²eËÆ¥K—°Z­q÷ïß]§hÑ¢¬Y³†k×®±lÙ2ªW¯ÎèÑ£ñööN’NI;æ3O<ñB?_1n¼ü2´m gÎIppxü©UÞ¼y¹råJ¼åW®\!_¾|O½o«Õš ý‹ýhÔ¨‘éIÒ¥KçüHDÌ»ÿ>cÇŽ¥[·n”,YÒtŽˆHÊuø0T­ 'ÂèÑv?+ȃLž[&‡PB™Ã3˜Ìd6#"""IÀ?ŠQŒwy7I¶§k$EHŸÞv“Ù3glß=ùúÚÎAvì0]–$Rûy‡ˆHjP¹re8`:CD숮’´åÒ%øö[xàîòìe%+“™Ì|泇=I²Í¾}û2kÖ,îÝ»ÇôéÓéÝ»wœ×ëÕ«ÇÚµk´­Œ3R¾|y|||ذa_ýu’4ŠˆHÚp‹[,g9Ýén:%írp€¥K¡`AhÙ®_7]ôP+Vä»ï¾‹·üÛo¿¥bÅŠO½Ý‚ ’ˆ²´Í’ w~Ò}Ĥ¢;=蟻õÙÓ¾}||ˆŽŽ~EfÝ»w .àææf:ED°`ÁΞ=ËàÁƒM§ˆˆ¤L÷îÁ°aP¹2äÌ 'NÀÀ¶sÂ4äY[&‡±ŒÅÞâ-Ó)"""’¶²•ïøŽ)L!=é“l»º@RŒœ9á“Ol3æÍ uëÚf 6]öÌÙóy‡ˆHjP©R%.\¸ Y™D$ÁÒÖoÉE>û ²g‡6mL—¤9íiOcóïMâ/¨*[¶,Ï=÷ÇçäÉ“¼öÚkq^>|8#GŽdÑ¢EDDDpóæM¶nÝJ³fÍbשS§K—.åÂ… DGGsùòe¨S§N¢ûDD$íXÁ þâ/Þà Ó)i›³3¬] С¤À»ÞðÁ :”/¿ü’°°0ÂÂÂøòË/ùè£4hÐSo·N:lÚ´)U^´žVmݺÕtÂ3aµZínßµk×NÔŸ)ÕöíÛ¹uë–Î}DR{÷î1f̺wïNÑ¢EM爈¤<'N@µj`» kË(RÄt•ÏêÜòY %”¹Ìe(C5;ˆˆˆH*M4ïñ¯ð iœ¤ÛÖµ’â<÷¬[g; Ò¥m3ˆ¤Ð”%{=ïI-*W®ŒÅbá—_~1""vBB$í¸wæÍȘÑtMš4iœâŸñY’l¯oß¾Œ;–·ß~› 2ÄyÍÓÓ“õë×óõ×_S´hQrçÎÍèÑ£yÿý÷c×9r$ß|ó /¼ðY³f¥víÚÜ¿Ÿ¯¾ú*IúDD$m˜Ï|ZÒ’|hzdãŠᅦ~€øËè5j0þ|¦NJ‘"E(R¤S§NeÁ‚q¦»RmÛ¶åêÕ«¬\¹2 kE sçÎüüóÏœ;wÎtJ’š5kU«VÕEç")Èüùó¹xñ¢¾Pù7«æÌ*UÀÑ‚ž=ÁÐÌs)Á³:·|ÖF1Š\äÒì """©Ä fpŠSL`Â3Ù¾®©aCøå˜>¾ú Š· XO7)K,{=ïI-räÈA‘"E8pà€é±"iÇêÕðÇðöÛ¦KÒ,<ðÃ! !”ÐDo¯aÆdÊ”‰wß}÷¡¯¿ð ¬_¿ž?ÿü“¨¨(¶mÛFƒ b_ñÅY¹r%áááܺu‹Ó§O3nÜ8\\\Ý&""iÃYβƒt§»éùGåʰp!Lžl»h(nÿõù¶ìüùó´jÕŠ¬Y³’7o^:uêDDDD²Ó„€ãÇÓ´iSœqqq¡uëÖO4¨âرc4mÚ4¶§iÓ¦ñzs\BBBâü; ⯿þг,$$€N:ñÍ7ß$¸=¥ûñÇùæ›oð÷÷7""»{÷.ãÆãí·ßÆÝÝÝtŽˆHÊqî4h¾¾Ð»7ìÚ¥J™®zfLŸ[>Kç9Ï<æ1„!dD7ì±w‘D2ŠQô£x<“}èZI±m7¢ýýwðö†¡\9X¿ÞtY‚¤æó‘Ô¦råÊþëó?l™··7=zô 44”EÿþýsD“î˜&ô˜œ9s†—_~™¶mÛÌ™3gxõÕWyýõ×ÛúOÏ?ï?sæLì@–&Mš”$ÇÅÝÝ#GŽP¥J¬V+%J”ÀÙÙ™˜˜J”(Á¯¿þ{QvåʕٲeK‚ÚSºˆˆ:wîLË–-yùå—MçˆÈß>ÿüs._¾¬ÙADD´b¼ð‚í&O{÷¸q>½é*yJcC^òÒn¦SDDD$ f0Ž8&ú;ÿÿ¢kÄ.¸ºÚÎSŽ…²e¡Y3hÔŽ7]&""©D¥J•8tèé ±ަD’ʼn°clÞlº$Ís‰ÉL¦-méF7^äÅ'Þ†ÅbÁÉɉƳpáÂ$oIˆbXÌbÞäMõcuÊóñÇíÚÙ.*QÂtÑ3åææÆÂ… iÕª¥J•bÀ€¦“mðàÁÔ¨Q€—_~9öâüüüâÜ®S§Nܽ{—‰'òÉ'Ÿ°uëVæÏŸOŽ9¨P¡K–,¡X±b nxpp†““S§N¥ÄüYú¯Þ¤Ô³gOš4i€³³3cÇŽ¥~ýú ~b)$ì˜øûûÓ¯_?ºwïg{ÑÑÑìÙ³ç±þþþôïß?Îû½½½ùóÏ?1bK–,‰³þÓ—råÊÍáÇ)_¾<7n¤pá±ÏÁößשS§»½”îæÍ›´n݋ł LçˆÈßn߾͸qãxçwpss3#"b^X¼ó|óm¶ï)SÀÉÉt•$Â9Î1ŸùÌd&È`:GDDDéW~e.s™Ï|²‘-É·¯kÄîxxÀòå°m¼ÿ>T¨ÝºÁ¨Q;·é:±cåÊ•ãüùóDFFÆ~ç-"ò_4Cˆ¤ sçBÑ¢¶éåŸ6´¡)MéE/îqï‰ßoµZ¹yó&«W¯&[¶¤ÿ%“ˆˆHBì`¸@':™N‘‡±X`Á(V Z´€k×L=s-Z´`Ú´i 4ˆI“&™ÎI´ªU«>ty`` íÛ··¼Y³fqfq¨^½:-[¶dãÆDGGP´hÑD Ôpuu%22ò‰z“R:uâøàÓ)""æmØåËáC°u+Ìž­Á ©À(F‘Ÿüt¡‹éIïñ/ðÂ3ûnD׈ݪ_ßv.3w.|û-”*ãÇÃ;¦ËDDÄN•-[€'N.{ !’úݽ K—Â[oƒþȧŸð g8à f˜Ny*ËXFyÊ㉧éù/™3Ûî*û×_о=ü=( 5ëÕ«S¦LaàÀ¼ûî»Ü»÷äƒoS §ÿ¸ð+""www,KœG¾|ù ‰]oÅŠT­Z___räÈA:uøä“O|LÂÂÂðööÆÍÍ GGÇØý}â r‰ŽŽÆÅÅ%I÷›\V®\I•*UÈ!û÷ï§\¹r¦“DäŸ~ú)×®]cÀ€¦SDDÌÙ½*U²ýõjX¾þ5àWì×Ç|L! љΦSDDD$‘nq‹ ¤ ]ðÂËtŽHÊ–%‹m0È©Sàåe»iYÆpø°é2±#‹…2eÊpüøqÓ)"b4 DR¿yóàå—¡P!Ó%ò/ƒDnró˜Ny"›ÙLa´§½éIˆ*U`ñbøôS˜4ÉtMRï% IDAT²xå•WØ»w/QQQT¨PÏ>ûŒ˜˜ÓYI¢qãÆüðÃñ–ïܹ“Š+Æ>·X,\¸p° 0hß¾=ëÖ­cóæÍqÞ÷_³~ìÞ½›#Fàî£#@œÁOâQ3‹¤ =¦ =&/½ôß|óM¼å[·nMPOÆ Y½zu¼åkÖ¬¡aÆ ÚÆ“èÚµ+3gÎdíÚµtyÈ®_¿NΜ9“|¿ÏRxx8]ºt¡]»v´mÛ–;wâææf:KDpóæM&L˜€¯¯/yóæ5#"’ün߆Aƒ Nðð€_…Ö­MWIúßXÄ">â#q4#"""‰4 „ÎƘN±… Ù¾ŸÚ»nÝ‚Šm7³ýãÓe""b'Ê–-ËÑ£GMgˆˆЀIÝBB`ëVxë-Ó%ò™ÉÌLfò_±m¦sDDDl˨F5ŠRÔtŠ$TÛ¶0c K–˜®IžžžìÛ·ž={Ò»woªW¯ÎLg%š¿¿?ÇgåÊ•DDDpãÆ ¾ÿþ{:tèÀðáÃã¬ëííÍñãǹsçüñ“&MŠ7  `Á‚ìÙ³‡{÷î±yóf ý=¼F 0€îÞ½KPP=zôxªæÿÚGJ‘ÐcšÐcâïïÏäÉ“™?>aaaDFF²|ùrÆŒIØ—ÅÇ' €¹sçråʘ7oñþ'…æÍ›B»víprrŠ÷ú… (Y²d’ï÷Yˆ‰‰aΜ9”*UŠ­[·òÝwß1{öl²dÉb:MDþeæÌ™üõ×_ôë×ÏtŠˆHò;zªW‡Y³l÷ׯ·Íî(©Ê‡|HiJӑަSDDD$‘.p‰Ld(C)€~nybU«Â®]°lìØÅ‹Ûf¹}Ût™ˆˆ¤peË–åØ±c¦3DÄh@ˆ¤nóçCž<Ь™éùMhBKZÒƒÜáŽé‘ǺÍmÖ²–×yÝtŠ<©=à½÷ÀÛM×$‹Œ32nÜ8Ž;†‹‹ U«Våµ×^ãÔ©S¦ÓþÓ?³iX,–‡Î¬áîîΪU«X¶lÅ‹'þüŒ9’éÓ§ÓªU«ØõÉœ93uëÖ%[¶lT¯^Û·o³hÑ¢8Û  cÇŽ8;;ãëëËÌ™3X¼x1·o߯ËË‹¬Y³Òºukšý}^ñ`×ãzµÇ}þG-{ÔòGm31Ç4¡Ç¤X±blذ•+WR´hQŠ)¼yóøòË/ÔëááÁ† XµjÅŠ£hÑ¢¬\¹’ 6P¢D‰§:.:VdË–^½z=´çСC4jÔè‘Í)A`` •+WÆ××—N:qòäIš7on:KDâæÍ›Lž<™>}ú'OÓ9""Éçþ}=*U8|||LWÉ3°}¬e-ãO:ҙΑDêOò’—÷xÏtŠˆý²X ];8q† ƒ€(YÒ6ƒˆÕjºNDDR¨Ò¥KAxx¸éIá4G³¤^V+,] ;Cúô¦kä¦32”a*SÈ@Ó9"""´™ÍüÅ_´¥­éy“'Õ+ðê«°s'<ÿ¼é¢dQªT)6mÚĪU«øè£ðôô¤K—.|ðÁ<÷Üs¦óâ°&à‹V®\ùÈu4h@ƒ »­víÚÑ®]»xËóäÉÃâÅ‹ã-íµ×âüüü(Q¢;wîä÷ßgàÀäÉ“Çt¦ˆ<¡)S¦CŸ>}L§ˆˆ<;÷î¿?4jU«Âñãд©é*I·¸ÅÇ|Ì;¼ƒ¦sDDD$‘üñç.wÆ0Ó)"iG† з/œ9¯¾ ={ÚΫvî4]&""yxxh†y, ‘Ôé‹/ ^=ps3]"Oh*S¹Ìe&1ÉtŠˆˆHV¬¬e-­im:E’’‡‡ínµ{÷B·niþNKNNNtéÒ…;vðÛo¿Ñ¾}{¾úê+ªT©B‘"EèÓ§Û·oçþýû¦SER«W¯²dÉÚ´iCîܹiÛ¶-!!!péÒ%.\H­ZµLgŠÈSºvíÓ¦M£_¿~¸ººšÎy6Nž//˜8`Õ*È™Ót•$“ÉLæ×ÊPÓ)"""’H¿ñ3˜ÁhF“ý<'’ìræ„O>£G!o^¨SÇ6ƒHp°é21 xñâëÿDä14 DRŸ?ÿ´ÍÒ±£éy n¸1”¡Œa ÁèI9róœç^1"I­JøæÛÅJ½{›®I1J•*Å„ 8{ö,‡âÍ7ßdûöíÔ¯_Ÿ¼yóÒ½{w¾ýö[nÞ¼i:UÄn?žY³fѨQ#òæÍ‹··7·nÝbÊ”)\ºt‰;wòî»ïâââb:UDiòäÉ888hvI¬V˜3*W†ôéá×_mwµ•4#œp&1‰ /yM爈ˆH"½Ïû”¤$Þx›NIÛJ—¶]ÿ´e œ=k{Þ·/\¿nºLDD’Q¡B…ˆˆˆ **ÊtŠˆ¤`"©Ï·ßÂýûЪ•éyJýèG Jà‡Ÿé‘XëYO~òSžò¦SäY¨_–,Ù³áãMפ8*T`äÈ‘=z”³gÏ2lØ0N:EëÖ­Éž=;•+WfРArçÎÓ¹")Ö7 dРAT®\™"EŠÐ¯_?2eÊÄܹs¹rå 6lÀÇLJ¼yu!HjÁ´iÓèß¿?Ù²e3#"’´®\–-Á××6À~×.ÛLŒ’¦|ÌÇd$#ïñžéI¤ïùž l`3pÄÑtŽˆ4l¿üÓ§ÃW_Añâ¶D4›»ˆHšàææÀ…  —ˆHJ¦!’ú¬\i;Ê‘Ãt‰<%G™Á ¾ûû/‘”`hNs,XL§È³Ò®|ú) S§š®I±ÜÝÝéÛ·/»víââÅ‹,\¸çŸž¯¾úŠF‘+W.š6mÊäÉ“ùå—_ˆ‰‰1,bÌÍ›7Ù´i  R¥JdÏž&Mš°}ûv5jÄæÍ›‰ŒŒä»ï¾£K—.š D$•š4i2dÀ×××tŠˆHÒڸʗ‡cÇ`ûv7Î6Cˆ¤)g9Ëlf3‚d%«éI„»Ü¥ýhG;^äEÓ9"ò ôéÁÇ~ÿ¼½aà@(W6l0]&""ÏX¡B… ‘GÓp~I]þú 6o†3L—H"Õ¡¯ó:½èE}ê“…,¦“DD$ 'œŸù™ 0"Ïšܺ~~1#ôèaº(EË—/;v¤cÇŽ±uëV¶mÛÆøñãéß¿?9sæ¤ZµjT­Z///ªV­Š«««ár‘gãÌ™3ìÛ·ýû÷³oß>:ÄÝ»w)S¦ 4`ذa¼øâ‹dÏžÝtªˆ$“ððpfΜɰaÃÈšUÉŠH*që d»;m§N0s&èãÒ¬Á ¦Ex‹·L§ˆˆˆH"}Â'œç<›Ød:EDþ‹««m0~÷î0t(4mj»iîÔ©àéiºNDDžœ9s’9sfΟ?o:EDR0 ‘Ôå»ïàÞ=Ûõb÷à9žcÁÓ9""’†md#8PŸú¦S$9ôí 7n€¯¯íŽKÞÞ¦‹ìF‰%(Q¢ï¼óV«•#Gް}ûvöíÛÇÂ… >|8‹…’%KÆñòò¢|ùò¤×„ÅÎ\½z5vàÇ?'}úô”/_///zõêEýúõÉŸ?¿é\1d„ dÊ”‰d*"©Åб#\¹_|:˜.ƒö±¯ùš¬ =:§±gW¸ÂhFóàŽ»éyœ’%aùrض Þ*T€nÝ`Ô(ÈÛtˆˆ$!‹ÅBÁ‚5 DDIB$uY¹êׇ\¹L—HÈG>†3œù7xƒR”2$""iÔ6P›Ú¸àb:E’ËСpû6¼û.dÉ¢‹œž‚Åb¡|ùò”/_>vÙ•+WØ·o_ìcÈ!\¿~L™2Q¾|yžþyÊ•+GÙ²e)_¾<9rä0ø Dl¬V+ÁÁÁ=z4öqøðaNŸ>ÕjÅÝÝjÕª1xð`¼¼¼¨X±"™2e2-")@XX³fÍbäÈ‘8;;›ÎIœû÷aüxð÷‡_„íÛ¡@ÓUbP 1ô¥/5©IژΑDÄ ²’U3¥‹Ø›úõáÐ!Xº´]75`øùA† ¦ëDD$‰,XK—.™Î‘LB$õ¸y6n„)SL—HêMo±ˆ>ôÑÔ´""bÄ}M béIn£FÙfŸëÜ¡];ÓEv/Ož<´hÑ‚-ZÃo¿ýƾ}û8tèÇŽcÕªUDFFP @Ê•+g H™2eȘ1£É!©Xdd$GމøqäÈŽ?Î_ý…Åb¡hÑ¢”+WŽöíÛS¹re¼¼¼È›7¯élI¡ÆŽ‹³³3ï¼óŽé‘ĹpÁv^´gmPÈ{ïÅbºJ [Ä"p€ƒÄ‚þ<ˆˆˆØ³Cb‹XÊR²ÅtŽˆ<)èÒ^}&N´ äŸ?ßö=—¾ÛIòäÉCXX˜é IÁ4 DRÀ@Û]œ[µ2]"IÈGf0ƒ:Ôa5«u§1Iv¿ð DИƦSÄ„qãàÆ ÛÅONNЬ™é¢TÅÁÁ2eÊP¦Lºuë»üêÕ«?~œƒrâÄ vïÞÍŒ3¸uëùóçÇÓÓ“bÅŠÅyxzzjfy¬;wîpñâEŽ?Ή'Ž}œ={«ÕŠ‹‹KìL5:tÀÓÓ“ *3gNÓù"b'._¾ÌìÙ³;v,NNN¦sDDžÞ7߀·7äÎmR¡‚é"Inpƒ! á]Þ¥<åÿI±¬Xy÷¨F5^çuÓ9"’Y²Øƒ¼õ íÛÃìÙÏ?oºNDD!W®\œ}šÓ§OsôèQV¯^Mxx8éÓ§ÇÝÝØG‘"E(\¸0… ÂÕÕÕÔG’dChh(çÎãüùóûç&44€téÒQ¸pa<<<(Y²$Íš5£téÒxzzâææføSˆˆ½;v,...¼ýöÛ¦SDDžÎíÛ0p L›f ?k–íâ"`#¸Ímüñ7""""‰ô_ð?±—½šõK$µ(T/__xÿ}ÛÀþŽm³‡èš*»”;wnvîÜi:CDR0 ‘ÔÁj… àÝwM—È32‘‰”¢£ÍXƚΑ4d;Û©G=}’–98ÀçŸC¦L¶A!_ ­[›®JsÒ§O§§'žžžñ^»ví§OŸŽsÑÿÞ½{Yºt)±ëeÉ’…"EŠP¨P!ÜÜÜ(T¨Pì`777Š)BæÌ™“ócÉS ãÂ… œ?>vÐÇ… 8wîçÎ#44”{÷î¶A… ¢D‰xzzÒªU«ØÁBÅŠ#C† †?ˆ¤F¡¡¡|þùçLš4Iÿ¿""öéøqxã øßÿàË/¡CÓE’‚Ä f@¹Èe:GDDD!Š(†0„ît§ ULçˆHRóò‚]»`åJøà(QúõƒAƒlßy‰ˆˆÝÈ•+aaa¦3D$Ó€I‚‹¡ysÓ%òŒä!ó1ýèGºPšÒ¦“DD$ ˆ&šŸø‰‰L4"¦Y,0}ºmpH»v°d‰.ŠJA²gÏN•*U¨R%þ—–·oßæÒ¥K\ºt‰ÐÐP‚ƒƒ æÒ¥Küú믜>}š?ÿü3výL™2áêêJÈŸ??®®®ñžÿóÏyòäÁÑQ§Õ‰uçÎ"""¸zõ*W¯^%44”K—.Åþó¯¿þJúôé åÂ… ܽ{7ö½®®®äÏŸŸ P¦L4h@(V¬ÅŠ£P¡B¤OŸÞà§‘´hÔ¨QäΛ·ÞzËtŠˆÈ“±ZmƒáýüàùÿcïÎãª.óþ¿»ì¨ì‹Š‰î¸+jšæ¸t—VN˘u—mSVwe3eiÚ6m·3ó«ii›Ê¹srÌrÉ,Å}ALTÜ”U@ÖßgÎw@pKå{¼Ÿ=Îã|9ïC¢ßs]×çúô´Ž»GG›JìÌÃ<ÌU\Å f˜EDDD.Ñ‹¼HÌcžÙQDäJ±X¬óZ&Xç¹^x>üæÏ·vƒ´hC<Gо}{Nœ8A]]ýÝ-"MÐÊi¾ù W/³“Èôð Ÿp?÷³šÕÚ©]DD®¸Ml¢„®æj³£ˆ=°X`ÁkQÈo~µµÖÛb×<<<Œâ€³9qâddd——Gvv6yyyäåå‘••Err2ùùùäååQSSc<ÏÙÙ™€€üýýñóókpìççgŸyíëëK›6mðððÀËËË¡»T”––RUUEqq1•••QXXHAAq\TTÔà¸þuAAAƒ‚€6mÚо}{BCCqvvfóæÍÄÆÆr÷ÝwIhh(DEEiç}±;|ðÁüñÄÝÝÝì8""./îº –-ƒ§Ÿ†Ù³ÁÙÙìTbg–°„ïøŽÕ¬ÆESŒ"""í0‡y×y!Äì8"r¥yz¬YpûíðüópçðÖ[ðæ›0x°ÙéDDä<Ú·oOuu5EEEøûû›GDìFk¥eXº&MRåz ç„æÏ b YÈT¦šIDDZ¸Õ¬&Œ0bˆ1;ŠØ ‹Å:80}ºõë[o55’\ºvíÚÑ®];z÷î}ÎÇÕÕÕ…"¶Â‘ÂÂB£À¡  €ÂÂB233IMMmPøP^^~Î×öõõÅÙÙœñóóÃÅÅÜÝÝñôô4ÛT‰³³3¾¾¾ô~kjjaÀŠ;l ©©©¡¨¨ˆêêjJJJ8}ú4eee”——SQQqÎïS¿¦þqTT”ñµ¿¿?DHHÞÞÞ ^gÕªUL›6¿üå/|öÙg 6ì‚Þ§ˆˆ^xáBBB˜>}ºÙQDD.ܪU0m¸¸À?@b¢Ù‰ÄURÉã<ÎT¦2’‘fÇ‘Kô8Aò ÙQD¤9…‡Ã;ïÀŒÖîC‡Â7«¯B‡f§‘³ðññ ¤¤D!"Ò$„ˆãË˃mÛà™gÌN"Í ?ý¹“;y„GÇ8üð3;’ˆˆ´`?ò#£ev ±7 üïÿ‚««µSÈÉ“ðÛßšJšÅb!((ˆ   ‹~nýîÅÅÅTTTP^^ΩS§¨¬¬¤¤¤„êêêEUUU”––µÉÊÊ¢¶¶¶ÁëÛ 5Îtúôé&w¨÷÷÷oÔNØÃãAǨ¨(£ Š­àÄÍÍ ///£Ã‰··7®®®øùù×þþþ\ôÏèlFÍŽ;¸ë®»¸úê«yúé§™={6ÎÚ±ZDìÌÑ£Gùë_ÿÊ[o½åÐÝŸD¤©®†ùóaÞ<¸áxï=¸ŒçqÒ²¼ÁdÁ V˜EDDD.ÑüÀ"±”¥¸£î–"­RB¬YK–À#@\<ôuíÕ›6‰ˆˆù¼¼¼ë""MQAˆ8¾U«ÀÉ FŽ4;‰4“?ð³˜yÌã5^3;Žˆˆ´P5Ô°‰M¼ÎëfG{õê«? ™™ðòËf';æææft‹È¿þõ/þøÇ?2kÖ,~úé'>ùä"""ÌŽ&"b˜7o‘‘‘L›6Íì(""çwø0ÜvìØo¼3gšHìX9¼ÄK<ÅSt@»‹ˆˆ8²jx”G™ÀÆ3Þì8"b¶I“àÚkáí·á¹çàÓOáÙgáî»­k±DDÄ.xÿ»XO!"r6:sÇ·j ¾¾f'‘fÒ–¶¼À ,`)¤˜GDDZ¨ì¤”R3Øì(bÏf΄¿ý ^î¼ÓºÃ®ˆ\‹…™3g²mÛ6òóóéÑ£_~ù¥Ù±DD8räŸ|ò ³gÏÆÕÕÕì8""çöñÇг'TVZ BT "çñOà‡ó¸ÙQDDDäý…¿°—½Ú KDþÃÍÍú¹ðàA˜2x„µkÍN&""ÿf+9uê”ÉIDÄ^© DߪU0z´Ù)¤™ÝÍÝô£¿å·ÔQgvi6°_|‰#Îì(bï¦MƒE‹àË/áÆ¡¢ÂìD"-Z||<7ndòäÉÜrË-<ôÐCTVVšKDZ¹9sæÅm·Ýfv‘³+*‚_ÿ¦O‡ûîƒõë!&ÆìTbç6°ÏøŒ7yO<ÍŽ#"""— €æ0‡‡y˜®t5;ŽˆØ›ví`Áøùg ‚áíD6;™ˆH«çåå¨Cˆˆœ Bı=jýà¡‚VÇ 'þÌŸÙÀ>åS³ãˆˆH ´ bÎ8›EÁ¤IðÔ¿ú•u¡•ˆ\1^^^|ðÁ|þùç|ôÑGŒ9’cÇŽ™KDZ©ðÙgŸñÜsÏáââbv‘¦mÙ}ûÂO?Á²eðê«Ö]`EΡ†æaF2’)L1;Žˆˆˆ\¢ÙÌÆ 'žá³£ˆˆ=‹…¥KaåJ뚬nݬDŠ‹ÍN&"ÒjµiÓ'''„ˆÈY© DÛÊ•àé ƒ™DL@3˜Áÿ:w¶~¾¬©1;™ˆH«c±Xpqq¡ººÚì("b§T"ŽmÕ*ëb;ww³“ˆI^àê¨ãYž5;Šˆˆ´ ¹ärˆC aˆÙQÄÑÄÇÚ5P^nm¥½w¯Ù‰DZ¼˜˜6nÜÈÕW_͸qãxå•W¨««3;–ˆ´ûöíã‹/¾`îܹê""öçÄ ¸î:xüqøÝï¬,…†šJD9Ìf6ó8ñÄ›GDDD.Ñ£ =zÀwß™LD¤ÕqiBæ| IDATvv¦FEy"r*Ƕf Œiv 1Q¼Ì˼Å[$“lvi!¶² úÓßì(∢£aÝ: ‚ÁƒaÅ ³‰´xÞÞÞ|ùå—¼öÚkÌž=›n¸bµ¯‘fðÜsÏÃÍ7ßlv‘†6m‚þýaÇX½æÌ'M É…{˜‡ñçyÚì("""r‰±ˆ•¬d pÒ2!ù%àå—á矡{w?ÆŒÔT³“‰ˆ´*‘sÑ'=q\GŽ@V jv1Ùt¦3ŒaÜ˽ÔRkvi¶³Žt$€³£ˆ£ ².ººî:ë ø+¯˜H¤Å³X,Ìœ9“•+W²qãF Àž={ÌŽ%"-ØîÝ»Y¸p!sæÌÁI‹¬EÄ^ÔÕÁ‚ÖÎÚÝ»[ B† 3;•8˜å,çüƒ?óg¼ð2;Žˆˆˆ\‚Óœæ)žân!‘D³ãˆˆ£‹‰… áûï!7úô{ï…ü|³“‰ˆ´x*‘sÑL¥8® ¬­ ÌN"&³`áÏü™d’ù3;Žˆˆ´)¤Ð›ÞfÇGçî}¯¿¿ÿ½u@¼ªÊìT"-Þˆ#ؼy3¾¾¾ <˜ýë_fG‘jΜ9ÄÅÅ1eʳ£ˆˆXååY ÒÜúä믡];³S‰ƒ)§œx€›¹™‰L4;Žˆˆˆ\¢×xL2y‰—ÌŽ""-ÉèÑœ ï¿‹C×®ÖÍÑ*+ÍN&"Òb999Q[«Í²E¤i*ǵq#ôêžžf';Ðî<À<É“ä£DDäÒ¤B/z™CZŠ™3áÿ€Ï>³.Î*(0;‘H‹ÅÚµk¹é¦›¸á†xíµ×ÌŽ$"-Ì®]»øê«¯xþùçÕDDìÃêÕÖñò={`íZ˜3ô÷“üÏó<ùäóo˜EDDD.Q9ü?ð{~OQfÇ‘–ÆÉ ¦Mƒࡇ¬ŸC{ö´Î‰‰ˆÈeçââBuuµÙ1DÄNi6@׆ 0x°Ù)ÄŽÌcî¸3›ÙfGVJ)9¨!ryMž ëÖÁ¾}0`ìÝkv"‘ÏÝÝ÷Þ{7ÞxƒY³fq÷ÝwS¥.="r™<ûì³ôêՋ믿Þì("ÒÚÕÔXÝŒc/ON†AƒÌN%êg~æu^çE^$œp³ãˆˆˆÈ%z‚'ðÃÇxÌì("Ò’y{[?—ÚæÀ¦N…k®;ÍN&"Ò¢TUUáêêjv ±S*ÇT^))*‘|ñå^á]Þe›ÌŽ#""*…j©U‡¹üzõ²5ÀСðý÷f'ifΜÉW_}ÅçŸ΄ (**2;’ˆ8¸ääd¾þúkæÎ‹Åb1;Žˆ´f™™põÕðÊ+ðúëðÕWÖÏ"¿@-µÜ˽$À}Ügv¹DÙÈg|Æ›¼‰'žfÇ‘Ö 2>þØ:VV}úX;ˆ?nv2‘áôéÓ¸»»›CDì” BÄ1mß••ÚéL¹ÛÁäAj¨1;Žˆˆ8 ìÄ:ÐÁì(Ò……ÁO?Áر0n¼ôÔÕ™J¤Å»þúëY¿~={öì!11‘ôôt³#‰ˆ›3g}ûöeâĉfG‘ÖlÉèÝrsaãF˜9ÓìDâàÞà ¶³øgœÍŽ#"""— –Zf2“‘Œd SÌŽ#"­ÍÀ°n|ñ¬YW]eí RQqîçíÛ‡5KDGTYY©‚9+„ˆcÚ¶ ÚµƒNÌN"vèÏü™ìä}Þ7;Šˆˆ8 =ì!Ž8,h·g¹BÚ´Ï?‡·Þ²€_w˜J¤ÅëÕ«7nÄÙÙ™Aƒ±mÛ6³#‰ˆÚ¶mK–,áùçŸWw1GuµõsÄõ×Ãøñ°u«µ¡È%8Ìaæ0‡gx†8âÌŽ#"""—è#>bÛx“7ÍŽ""­•Å7Ý»wÃ3ÏX»Zvíjí ÒÔFi0j”uc`mè$"ÒHuu5555xxx˜EDì” BÄ1¥¤h’KÎ*Ž8f2“ßñ;òÈ3;Žˆˆ8˜}ì#†³cHk0cüðƒµûÝ€°s§Ù‰DZ¼ððpÖ¬YCïÞ½9r$K–,1;’ˆ8˜gŸ}–„„~õ«_™EDZ££GaøpøÃà/±.¤ñö6;•8¸:ê˜Á :Ó™YÌ2;Žˆˆˆ\¢Jxš§¹—{é…ÖTˆˆÉ<=aÖ,Ø»ƃ;ï„!C¬.ë{óMÈɱn 6v,™“WDÄN>}@BDä¬T"ŽI!rÏò,žxò;~gvq0i¤© DšÏС°ctè`m¡ýá‡f'iñ|||X¼x17Ýt7Üpï¼óŽÙ‘DÄAlݺ•ï¾ûŽ^xAÝAD¤ùýóŸÐ§uQÌæÍpÏ=f'’â}Þg5«ù€pÅÕì8"""r‰æ3Ÿ *˜Ë\³£ˆˆüGx8¼ólÚ®®Ö¢›o¶vÉÍ…ù󡦯ÚóÐ!˜0þ½øYDDT""ç§‚q<55Ö–‚*‘sðÁ‡7xƒù lhpߎð%_š”LDDìÙiN“A† B¤y²e0s&Ü}7Ü{/TVšJ¤EsuuåÃ?ä¹çžãþûïçÅ_4;’ˆ8€§Ÿ~šÁƒ3vìX³£ˆHkRQaý¬0y2Lœ[¶@÷îf§’"‹,žäIåQúÑÏì8"""r–±Œu¬kpÛA²€Ìe.íioR2‘sè×~ú >ÿÜúù6>n¼ªªþó˜ª*káÈ´iPWg^V;R\\ €¯¯¯ÉIDÄ^© DOZ”—« DÎëfnæZ®å^jNsšxntãn¡€³#ŠˆˆÙÏ~j¨¡+]ÍŽ"­‹ ¼ü2,\h122ÌN%ÒâÍž=›·Þz‹Ù³g3sæLê4¹$"g±~ýzV¬XÁóÏ?oviMÒÒ`ð`øÛ߬Ÿ>þ<=ÍN%-È fОöÚA\DDÄÝÍÝ$’ÈT¦’u,ùQ¥3¹ûLN'"r L {öÀwBRR°v ù¿ÿƒçž3'£ˆˆ)(°®sô÷÷79‰ˆØ+„ˆãII±.˜‹5;‰8€ÿåI#ÇyœXbyŽç8Íiê¨c+[ÍŽ'""v&4œpâ*®2;Š´V7Þ›7Cq1ôî _mv"‘ï¾ûîãÓO?åí·ßfúôéTWW›IDìгÏ>ËСC=z´ÙQD¤µøøcëΩ..°};üú×f'’æ>à;¾ã>À‰ˆˆ8’ È"‹:êø'ÿ¤ ]˜Ît–°„7xW\ÍŽ("r~°k—õsoSjkaÞø0”¡,`é¤SC n¸±™Í&§{sˆCDfG‘Ö¬[7ض n¿n¸ÁÚ»¬ììß½jjš/ŸH tË-·ðÕW_±páB¦OŸN~§D¤ž¤¤$V­ZÅüùóÍŽ""-ŇÂÁƒMßW^3gÂôép×]°ntîܬñ¤å;Æ1çqþ‡ÿa8ÃÍŽ#""")™dê°vº­¢ŠÓœæS>ÅoŽsܸODÄ®-^ «W7îr¦ßþ–,ižL""vª  ‹Å‚ŸŸŸÙQDÄN© DÏþýcv ±sÕT³€t¡ I$Å ¶û7²Ñ¬x""b§2È Š(³cˆXwEZ°¾ú ¾ùú÷‡Ÿnü¸¤$èÑ~ÿûæÏ(ÒÂLš4‰%K–°hÑ"î¸ã…ˆˆaöìÙŒ5Š‘#GšEDZ‚¥Ká¿ÿ&Nl\ø½{7 ÿ»u±Ë‚àæfNNi±ê¨ã¿ùoBa.sÍŽ#"""¿@2ɺ€ÔPCeLg:ÃÆv˜”NDäTV£‚Ó.]œ:vèï5i½ ñññÁål]•D¤ÕSAˆ8žƒµ#šœÓQŽO<ÿÃÿPFU4ÞM –Z„ˆˆH#™dN¸Ù1Dþcòdëw@ h]fSZ ·Þj=~õUX¾ÜœŒ"-È5×\òeËX¼x1·Þz+ÕÕÕfG“ýðÃüøã<÷ÜsfG‘– °ÐÚõÃÉ €ûïÿÏ}l-÷ò‚-[`ÂórJ‹ö6o³ŠU|ÄG´¡ÙqDDDä¨ß!¤¾Zj©£ŽÍl&Þç}Ò‰ˆ\€äd8|jk­!œ«0¤¶ÖÚEdìXÈÈh¾Œ""v¤  ³cˆˆS¹˜8žƒáž{ÌN!v¬Š*òÉÇ §]AΔO>™dAD3¦{vŒc$’hv ‘†¢¢àÇaþ|xì1X»Þ{ž|rr¬áNNpË-š ¡¡f'qhÇç믿æºë®ãŽ;îàã?ÆÙÙÙìX"b’¹sç2vìX†nvi f΄‚ë9|m­µdà@kç¿/¾€‡‚×^W×ó¿–È/p˜Ã<õïÿ0Àì8"""ò mf3Õœ{#“6´¡+]›)‘ˆÈE8ÐZ²cìÜi½lÝ ééPW..àì §O[_] 'O˜1°iøù]з9uê•••ŽËÊÊ8ýï×¾Øã3ÕÕÕQXXxÞ,çz³qwwÇÓÓó¼ó÷÷Çb±œ÷5.åØÍÍ //¯‹Ê/"—ONNÁÁÁfÇ;¦‚q,yyPTW]ev±cWq)¤0 ìf÷YÃ,XØÌf„ˆˆˆABÄn¹¸Àœ90t(L›f(?pÀ:(ÖÅd¥¥póÍÖâ-^¹$£GfÉ’%Lœ8‘ûî»wß}÷¬“)"Òr­X±‚5kÖ°víZ³£ˆHK°r%|òÉÎámzÈÚpåJ=ÚœlÒ*ÔRËÜA4ÑÌf¶ÙqDDDä*§œC:ëý®¸Ò–¶,c½éÝŒÉDD.LYY%%%”TWSIep0§† ¡¼¼œê¢"\öîÅïèQ¼"83“ÀœÜ*+¡¦ÒÒHîÖßõêEuu5………TWWSRRBEEååå ?.…‡‡mÚ´itÜ///ÜÜÜÎùz...øøø\T†’’’óv2¯¬¬äÔ©Sg½¿~!Jyy9•¡)¶â6mÚàáá...àì쌯¯¯QLâé鉻»;¾¾¾¸¸¸àïïoü,l?WÛÏÏßßooo|||.¨F¤5ÉÌÌ$"BkEäìT"ŽåàAëuçÎææ»AëYÏíÜÎb7Ù2× 7¶°…ÉL6!¡ˆˆØ›j8Îq„ˆ}3~ú  ‹¥áb²ª*X¿^}žzʼŒ"-ĨQ£øú믙4ižžž,X°ÀìH"ÒÌæÍ›Çøñã6l˜ÙQDÄÑ[ »œ¬ XêsrOOèßßœlÒj¼Â+lf3›Ø„ç^¨$"""ök';©¡¦Éû\q¥;Ýù–o !¤™“‰HKVSSCAAÅÅÅRZZj-ê()¡¸¸˜¢¢"ãëÒÒRŠŠŠŒÛlµÝV[[{Îïåë닳³³µ¸ÀÝß=èÄÕÔÐõôijÝÜoT€`+R8[!ÇÅ·tõ‹C~é±­ø¦¸¸¸QNqq1999 cûsd{̹Øþ¿úùùáããƒÞÞÞøùù·ÙŠGüüüðõõmð8|}}?#"Žîرcôî­b_9;„ˆc9p\]!*Êì$â¼ðb‹øàwü AaÈiN³ fÅ;“C5ÔF˜ÙQDÎí™g¬Ý@š0¯­…§Ÿ†áÃaÈæÏ&ÒÂŒ;–¿þõ¯üæ7¿!88˜ßÿþ÷fG‘fòÝwß‘””ĦM›ÌŽ""-Á#@~~ãb€êjÈɻ ›?›´ ÛÙÎæð"/Ò‹^fÇ‘KL2.¸PMÃãqæWüŠÏùO´«ºˆ4­¬¬Œ“'ORPPÀÉ“'Ïy\ÿR\\ÜäëÙí×ïìàãワ¯/QQQÆ×¶ÛšZÜf'‰ 1írþPZ©6mÚÅ/¦d8}ú4eeeF“³Õ/@***"##£ARaa¡QpÒ???hÛ¶m£KýÛÏqâìÛ·¯ÁíMu,ñððhð;о}{BCC $00ÐÐP‚‚‚ $$$??¿Ëò>DÎTWWGvv6áááfG;¦‚q,ÙÙjv q@7s31Ä0žñä“OU”SNiÄkrB1ÛIN€9; ˆœWn.Ü{¯µàã\‹Éjj¬»ß}7,ZÔ|ùDZ°û￟ŒŒ î¹çÂÂÂ3fŒÙ‘Dä Z²d ›7oVw¹tÅÅpçç?‡ëcî¹G"Éeõ?ü9ä°œåZ *""Òlb“1×mù÷âO<À&'‘Ë%''Ç(èÈÍÍ%//œœŽ?N^^Ç7ŽOŸ>Ý๶…ê¶Åê½{÷>oWWW“Þ©Èåeë4tÑÏ­ªª:oÇœüü|RSS¢«üüü¯áîîn‡Hpppƒã‚‚‚Ö&•ráNœ8AEE… BDäœT"Žåøq 1;…8¨ÞôfÛ˜Ä$RH¡šjœqf [T"""R€Ú¹CìTi)„…ÁÞ½Öb TW7ýØêjøúkí0,r½ð 9r„›nº‰¤¤$ºwïnv$¹êêê˜7o×_=ýû÷7;Žˆ8ºÇ³k×Ö6}¿««µ;HHÜr Üz«ŠAä²ZÆ2Þá>çs¢ˆ2;Žˆˆˆ\¢jØË^\pÁ 7þÁ?Ïx““‰È…*((àСCdee‘Ýà8++‹ôôtJKK<' €ÐÐP cèСÆqýÛ###UÜ!ò ¹ººÅ£  €¬¬, ŒßãúÇû÷ï';;›ÌÌL*++ç¹¹¹Ñ®];ÂÂÂˆŽŽ&44Ôø¶Ý…‹‹–÷ 8p€èèh““ˆˆ=Ó¿âXrr G³Sˆ %”$’¸‹»ø’/©¥–ÍlfÓÌŽ&""&+ o¼qE¥b§¢£aÏk‘ôš5°r%,^líâêjí R¡Y]<ü0  ½z™—[¤…°X,üõ¯e̘1\wÝulÙ²…víÚ™KD.³¯¿þš­[·òÞ{ï™EDÝ?À‡6î ââb-àŽˆ€É“ᦛ`èPkÁ·Èe”Gwr'Ó˜ÆT¦šGDDD.ƒ½ìå4§±`!˜`–³œxâÍŽ%"X7ÉÊÊâðáÃ=z”cÇŽqìØ1222ÈÊÊ"##ƒãÇSSSXÇ›CBBˆˆˆ ,,ŒØØXFMxx8‘‘‘F‡€€“ß™ˆœK@@Àÿžž>>ÀÆCë­Ö;õóóÃÉÉé’2‰´fdeeqèСF—´´4ãw¾þÎÿ¡¡¡ôêÕ‹n¸¡ÁÎÿêæ!Òú´mÛ–¶mÛ{ÖÇTUU‘——gt©ßIh÷îÝ|ÿý÷=zÔ8'qss#""‚èèèF—«®º ??¿æz{ríß¿Ÿ.]ºh~[DÎI«ıääXÀI«RUUE~~>ùùùäåå‘››K~~>ÅÅÅSXXh׿RTTDmý²Ï4õ>ÄÍóon²ÃÉÉé¢OŽë/¨´±¢\Û÷ô÷÷Ç××·ÑÅßß???|}}i×®ÁÁÁ´oßÞ¸hÐ@Dä—)¤ξø\Ä %%%”””4:Ç)..¦´´”òòrJJJ¨ªª¢ðª« <œ¨£G‰Í̤ßñãD:û÷óvx8óÚ´¡¢¢â-’«ÏÙÙ__ß³ÞïââbL@^Šó?]ÌùÕÙøùùáêꊯ¯¯11êãル«+þþþ¸ººâíím,¬ÿx|||ðõõ5®mçpÚ‰§ek×®‹-bèС̚5‹×_ÝìH"r™|õÕWüüóÏ|üñÇfGqh'Nœ0Æòl—'Nç³õÏo |]VVvÁß§©Ýë/v»g[ÀWZZJUUÕ½†««+>>> ÆìÉÈàú¬,ê€c!!ìïÛ—ÌpëÖ   Ú×ÖÒ>;[cyrÅü?°ŠUüÄOøröÏo"""ެ¬¬Ì˜.,,4.EEEQVVFEE………FGAA1®XXXHEEeeeçŸS>‹ómjs)c…MAW÷­Æ=ÍâyÅÜŸuÿ/z]Ûø®——øùùúûûsæÆùµí\×6þYÿr1çß"Ž ##ƒýû÷sèÐ!>l|>|˜¼¼<Àú»j,ºž8q"?ü°ñµvì‘_ÊÕÕ•°°0ÂÂÂHHHhò1¶ŽDgþ=µ{÷n¾ùæ²²²ŒÇM§NŒ¿£:uêD—.]Ôm¤¥¥cv ±s*Çqú4© ¤9~ü8ÙÙÙF <[Ë̼¼j;­ßØv_ýpÎÚ)®¹Ü÷ŸCÛy)ü§+týŽÏ¶sX[‰í¾ÒÒR***ŒÂl[ñÌÉ“'ãú3EEEMn4äææÖ @Ä6ZÿüÖv©¿Ñá™5Š4§êêjÒÓÓ9tè©©©ìÞ½›ÔÔTvîÜiü»Ntt4½{÷fòäÉÆbê®]»j,_DLc±XŒ1£ÄÄÄF÷WVV’™™Ù¨“ÑòåËÙ¿¿±9Š»»;;w&>>ž¸¸8âã㉎Ž&>>^ÿNÛ™}ûöñ«_ýÊì"bçT"ŽÃ¶S›Z™9„ªª*Ž=jœT44”ˆˆÂÃÃ‰ŠŠjÐ:°C‡Ú¡PDZ•*ªpÇÝìbçÊË˃’œ” IDATÉÍÍ%++Ëh×{üøqãß_[±‡íÚÖ¼>ooï„……5( ¨_ôÚÔíÒ§§'žžž@ó.,--mPÈc›=s÷kÛí999ìÙ³§AQЙE?îîîÆŸú…#¶‚Þàà`BCC $44Ô® uZº[o½•¤¤$îºë.zöì©yDÜÂ… IMMå‹/¾0;ŠH³«©©áرc &ˆ=Jff&ÙÙÙddd4èÜáááÑ H!>>¾Á¸m<Ïöµ© å.‘——^^^¿øsÆ_×ÕÕ5Zà˜——×`,ïØ±clÞ¼™¬¬,£¬;DÅ"gŽåÙ7JëVH!S™ÊhFó™GDDZ±ÊÊJrrrŒsJ[aqvvv£9ÍS§N5x®››[ƒóËvíÚѱcÇ&»SÔ/@ð÷÷o• ´ÝÝÝqw·Îm\é±ÑÒÒÒÝXšêÐb»mß¾}¬_¿Þ7?³€ÇÛÛ»Q‘Hhh¨1Bdd$ÁÁÁý™BÌUZZÊÞ½{Ù»w/{öì1®8@UU‹…:Э[7ÈwÜA·nÝèÚµ«6Ø‡åææfŒ5%77—´´´/~ôÑG=z”ºº:\]]¹êª«ˆ‹‹£k×®ÄÅÅ7¶Æs-³ÕÖÖràÀºtébv±s*Ça›tü÷B/1_uu5 55Õh™yðàA:DFF†±`@@;w&**Š„„®»î:ˆˆˆ 44”ÈÈHcŸ\<[ÁÌ…(++#==œœ222ÈÊÊ2vuüá‡xÿý÷‰f"##L,ÇÄÄÇUW]eì"."ÒRTS‹N[­ÒÒRÒÓÓÉÈÈ0&'sss]sssÉÎÎ6vL±ñ÷÷7çÒ¹sg Ðh1ýcM^µÞÞÞx{{ú‹ž_WWwÖî1õ;ÆÖ­[Iòú»n{xxHXXXƒ‚‘   "##‰ˆˆ ""‚`uZ¼"Þ|óM6mÚÄm·ÝFRR’1ù."Ž¥¦¦†çŸž_ÿú×ÄÆÆšG䊨©©áðáÃìÞ½›}ûö5*þ°-Îòòò"::šŽ;˨Q£Œ1<[Ú¶mÛšün—Åb¹¨±¼òòr£KFFFƒnË7näïÿ;¹¹¹Æk‡††6ËëÒ¥ ݺu£[·nmê¨ã.*>æc,¨£Œˆˆ\§OŸ&==ÇsôèÑÚÎ]Ž?Þà9!!!„„„N`` ±±±Ô¨¸ØODÚ-ÛxhDDÄE?·¨¨ˆãÇ7*޶u…ÉËËcÇŽMþù±yÚ Ómk :tè@ÇŽéСƒÆÝ…¬¬,’““Ù¾};ÉÉÉ$''säÈÀº8:&&†ØØX¦L™Ò`q³>+‰HkDPPP£î"eeeìÝ»—´´4vïÞÍÞ½{Y¼x1¯½ö•••X,:vìHŸ>}ŒKß¾}ñ<©\˜}ûöqêÔ)zõêev±sZñ&ŽÃ¶3È¿[³J󩬬dß¾}ìÞ½»ÁeÿþýTVVâäädìH×¹sgÆŒcGGGkk;âééiLŸÍÉ“'tv±]¯X±‚ŒŒ jkk“¸¸8ãKLLŒÛDÄa© ¤å*//7:•eddžžNff&™™™Æqaa¡ñxOOO"""ŒÅõ½zõ"((Ș´ 4&.µ¯\ ‹ÅB»víh×®Ý?§¶¶–ÜÜ\£HÉÖ¡ÆÖµæðáÃlܸ‘ãÇ“——g<ÏÝÝÈÈH£3œ­X$22’¨¨(ÂÃÃ/*‡X¹»»óùçŸÓ¯_?ž{î9^~ùe³#‰È/ðÅ_°oß>-Zdv‘KVUUÅþýûIÛÔÔTc7ÔŠŠ """Œ‚¡C‡6( P©}iÓ¦ ]ºt9瀥¥¥ |l—M›6qøðacüÖVäcÇ‹§[·nøúú6ã;’+éOü‰Å,f%+iãvÙóUUU‘žžÎ‘#G\>Ì‘#GÈÊÊ2ºÞz{{ÌÂÃÃéÑ£‡:>ÞXÚ¡Cœ›ã-:œ˜˜^}õU|ðA&OžÌ€ÌŽ$"¡¦¦†ùóçsûí·Ÿsó{ÔÔ8ÞöíÛ)// 44”øøx†Îý÷ßO\\½zõÂÇÇÇäär9y{{Ó³gÏ&Ï «««IOO7Æñ:Äúõëyë­·8õï ˜BCCŒã 4è‚;˜ˆýØÆ6žäIæ2—QŒ2;Žˆˆ8Û˜jýsÛ×¶bbÛxjtt4ñññLš4ÉG £S§NK•ËÎÍͨ¨(¢¢¢Îù¸‚‚c¼³þœÀ·ß~ËþýûŽßõ×9œ9î©«ö/++‹µkײyóf£¤°°gggºvíJŸ>}xâ‰'Œ…ÈþþþfGiQ\\\Œ Kþë¿þ˸½   AaÞ¢E‹xùå—©©©Áßßß(0`‰‰‰ê$ò %''»»»ÙQDÄÎiÅ›8[‡„\Vûöí#))‰M›6±uëV~þùgªªªðññ¡OŸ> 8x€îݻӵkW\´rnnnÄÇÇÏM7ÝdÜ~úôiöîÝË®]»Ø¶m[·nå믿¦¤¤777zôèABB$11ñœ»Šˆ˜¥šjœÑbg{VZZÊž={HMM%--ƒ—¢¢"\]]騱#;w¦k×®Œ?žÎ;EDDíÛk—Ti=ÜÝÝ #,,Œ„„„&SVVftÊ9|ø°ñ;µfÍþú׿RRRXÏ;uêDçÎéܹ3]ºt1:Åif̘Á¢E‹¸ãŽ;HNNV÷ òé§Ÿ²ÿ~/^lv‘óÚ»w/IIIlÞ¼™mÛ¶5ÇëÝ»7ýû÷ç¾ûî£{÷îtëÖMÿ ...Æ"·I“&·×ÖÖräÈRSSÙ¾};Û·oçý÷ßgîܹ€uGݾ}ûÒ¯_?†Jÿþý5.lÇNr’¹‘á ç÷üÞì8""b‡rrrHII!%%…´´4£ƒÜÉ“'ðòò¢k×®tíÚ•ë®»Ž'žx‚èèh:vì¨BQ±kßäý¹¹¹9r„C‡ö“’’øàƒŒévíÚѵkWºuëF·nÝèÕ«½zõR÷DÙÖ°¬Y³†¤¤$<ˆ‹‹ =zô oß¾L™2…¾}ûÒ³gOm\*"b¢€€FŨQÿÙ˜âÔ©S¤¤¤E"«V­bÁ‚TWWsÕUW1lØ0†ΰaôvìmß¾½Qב¦¨ D‡­Cˆ>ÐýbµµµìÚµ‹5kÖ°víZÖ¬YCNNžžž$$$0|øp}ôQúõëGLL NNNfGáîîn ŽÝvÛm€õÏ[ý]*·nÝÊ'Ÿ|Byy9¡¡¡$&&’˜˜Èˆ#ˆ×Ÿ71] 5êb'Š‹‹Â={ö°k×.öîÝËÑ£G©««ÃÃØ˜:wį̂Q£˜1c†±H=**J] D.‚§§§1ÙÙ”ÜÜÜÅW`ëÖ­üýïçĉ€uÀ×¶³ž­H$..Žðððæ|+¦²X,¼ûî»ôèуùóç3þ|³#‰È¨©©áÅ_dúôéÄÄĘG¤ššRRRŒ1¼¤¤$rssñòò"!!#FðØc‘ q<¹hNNNMŠäää4è6³`Ážzê)<<<0`Ç'11‘!C†àíímâ;›Zj¹Û©¡†Ïø 'ôwˆHkfë g+þرc)))?~€ˆˆbccéÛ·/·Þz«Qirr‘+#((ˆ   &;ú¦§§³oß>ÒÒÒØ³gûöícÅŠdffB¯^½èÝ»·1ƒ‹‹æ‘.§ššöîÝ˺uëHJJâ§Ÿ~"==OOOúôéÃ7ÞÈСCILLTçVj̘1¬\¹²Ùž©ßïJ¿žˆ½óòòbÈ! 2ĸíÔ©S$''³nÝ:¾ÿþ{|ðAÊËË ¦ÿþ 6Œ¡C‡2pà@\]]MLoêêêØ±cS¦L1;Šˆ8}RÇQYi½vs37‡ƒÉÎÎféÒ¥|ûí·üøãàççǰaÃxä‘GHLL¤_¿~¸éç*—™““±±±ÄÆÆrûí·PYYÉ–-[X»v-k×®åé§Ÿ¦¸¸˜¶mÛ2räHÆÏ„  19½ˆ´FN8QMµÙ1ZcÇŽ…ƒÛ¶mc×®]¤§§ЦMbcc‰‹‹ãÞ{ï%66–øøx:uꤢ‘fb›4|˜ï¾ûÎì("9r„o¿ý–o¿ý–µk×c&Æ ã‰'ž`ذa$$$hbT®˜&L˜À„ ŒÛ8`ìÎûå—_2þ|\\\èÓ§×^{-'N¤ÿþ*J2É\æò=ßó#?ˆvpimŽ9ÂúõëÙ°a7nd×®]TTTàêêJ\\½zõâÚk¯5µ·k×ÎìÈ"v#**Ѝ¨(®¹æš·Ÿ8qÂ(¦JIIaÙ²e¼ñÆTUUѦMºwïΠAƒù„·Þz‹ýû÷SVVFLL &LàÖ[o¥{÷îWü}šÉËË‹aÆ1lØ0fÍšEee%[·neíÚµ$%%ñòË/SXXˆ¯¯/#FŒ`ܸqŒ7Žèèh³£›îÀ¨Cˆˆ\KÝ%ük»mÛ6.[ ‘³Z²®»ÎÚ)¤M³ÓØ­ÚÚZ¶mÛÆ7ß|ÃÒ¥KÙ¾};Œ5бcÇ’˜˜HÏž=[ì",q,555ìܹ“5kÖ°bÅ V¯^MEE L˜0‰'’Ðä ȕԜÿ¾Y,¾üòË+6óî»ï2cÆŒ+òÚ"-ÍÜI.¹,e©ÙQZ¬œœ£ðÃvÅb¡K—.$$$ЫW/£Ó@ÇŽµ¨HÄåçç"?ÿü3Û¶mcçÎTTTàååEïÞ½‘~ýúѵk×ó»^WWÇØ±cÉÊÊbûöí¸»»›©Ù,\¸©S§6ˤΕ>_·›/\¸ðм~}:_7OUUݺuãšk®áwÞ1;Ž´RÕÕÕ$%%ñí·ß²téRvïÞ¯¯/cÆŒaôèÑ$&&ßìã""ç’ÍÚµkùñÇùî»ï8räŒ7Ž &píµ×jçÞf²’•üŠ_ñÿøÜ˽fÇq(úYÌžÖCTVV²mÛ66lØÀúõëY¿~=ÙÙÙ¸ººÒ·o_ DŸ>}ŒñUm(rùTVV’ššJJJ ÉÉÉlܸ‘äädªªª 3vBùä¾üòK²³³‰ŠŠ2vs5jmTD# ¬¬Œ~ø¥K—²téR222 cêÔ©üæ7¿¡OŸ>Í’Ãþ¾\´ÀLäÂ=Àìa«Ymv”cÿþýüôÓOüôÓOüøãF«õÎ;7X Þ·o_üüüLN+"WRUU»víjP¶sçN*++ñññaÈ!Œ1‚#Fп‡Ô=xð ={öä¹çžãÉ'Ÿ4;N³QAÈ/£óuó¼ûî»üö·¿%--N:™GZ‘ÚÚZÖ¬YçŸ~ÊW_}Eaa!]»ve„ Œ?žÄÄD-‡’ššjt¨^·n£Fâ¶ÛncòäÉxk,ÿŠ8ÊQúÑk¹–OùÔì8""Ç BêêêØ¹s'Ë—/gùòå¬_¿žŠŠ ÎC† ¡_¿~š1AYY[·n5 ´6lØ@~~> 6Œk¯½–k¯½–=z˜Õååå¬X±‚E‹±dÉ Œ.ƒãÆcÈ!=|¹mÿ÷ßÏŒ38tè ©Z.„\y®®®TTT8ÌfÅçú¹½ûôÒKìØ±Ã˜ÏÎÌÌäúë¯gõêÕøøø4zÎ÷ßß &88˜Ÿþ™   Û½{7ñññvñÿÌžTUU±nÝ:£›TJJ mÛ¶eÒ¤IÜpà Œ;¶ÕœÿÞyç9r„Õ«[Æ„HKfëM[oyµ8ëuuµ¹9ìHzz:/½ôñññ$$$°téRî»ï>RRR8zô(o½õ&Lh5'Aâø<==™8q"o¿ý6éééìØ±ƒ3f°dÉúöíK÷îÝyå•WÈÈÈ0;ªˆ´@žxRN¹Ù1ZZZï¾û.·ÝváááÄÄÄðÈ#““ÃŒ3X¹r%'OžäÀ|ñÅ<ñÄ\}õÕ*i\]]éÓ§wß}7ùË_زe %%%lÙ²…W^y…¶mÛò§?ý‰¡C‡Àرcyá…X·n•••fÇ¿(;wæ‰'ž`þüùäää˜GDšPUUÅË/¿Ì=÷Ü£bi6?ÿü3³fÍ¢cÇŽ\}õÕ$''3{öl8ÀÞ½{yýõ×=z´ŠAÄáÄÇÇóä“Oòã?’——ǧŸ~Ч§'÷ÜsÁÁÁÜvÛm|ûí·Tk\ÿ²© ‚ÉL&œpÞã=³ãˆˆÈe”ŸŸÏçŸÎôéÓ §wïÞ¼öÚk„„„ðöÛo“––Fnn.‹/fÖ¬Y$&&jXÄ$žžž >œ§žzŠýë_äåå±wï^Þzë-yå•WèÙ³'áááÜu×]|ùå—œ8qÂìØWTmm-«V­búôé1yòd:ÄìÙ³9räÛ·o祗^bĈ*¹@ ++«Ñí»víb„ øøøàããøqãØµkWƒÇX,, {öìaРAxzz’˜˜ÈHMMeàÀx{{sõÕW“žžÞä÷?~¼ñ=Æßà{Ø^ßb±ðãÿŸ½û‹âêß~¯ÔeAQP–HlÑ V,Q¤X¢HŒ P A#Ö'SÔØcl±l1ò¨  J°ƒÑ',Á¨ˆé@zø¾øc_Ö]`–Ýe<ŸëâJö0sÎ=EæìÌœ™Ë€   ©rEòÊóz=5Ëž>>pttÄ?ü )Û²e |}}å©A%%%r¯‡wïÞ ‘CGG#FŒÀ† pçÎ$%%᫯¾BBB&Mš„¶mÛböìÙøã?PUUÅw\µúóÏ?1lØ0¾c0 ÓD°!LÓQ= ¤²’ßàòåË;v,lll°e˼ÿþû¸víâââðí·ßâÝwßå;"èD¯^½°bÅ ÄÇÇ#22ÇǦM› ‹1~üxüïÿã;"Ã0Í¢¸òòr\¸psæÌ••ºvíŠÅ‹#''sçÎÅÕ«W‘——‡ððp|óÍ7ppp€‰‰ ß±†Ñºººèׯ|}}qäȤ¥¥ááÇغu+ÌÍÍáïï{{{˜˜˜ÀÕÕl2M—.] ¬\¹’ï( Ãȱwï^¤¥¥áË/¿ä; ÓÌUTT ((vvvx÷ÝwqìØ1xzzâÁƒ¸uë-Z„N:ñ“aT¦U«V˜2e Nœ8ôôtlÙ²?Ƹqã`ee…Õ«W7™þœ&ó’„ßð„`73 Ã4u999øé§Ÿ0lØ0´mÛžžžHNNÆÜ¹sñ÷ßãÙ³g8|ø0fÍš…Î;ó—a˜:téÒ³gÏÆ‘#G‘‘›7oÂ××ñññpww‡¹¹9FŒ€€äææòWe=z„åË—ÃÚÚø÷ß±~ýz¤¦¦âÊ•+X¸p!¬­­ùŽÙ$ݼyb±Xª,!!£GÆøñã‘””„””¸»»c„ xúô©dºê›ÌW®\‰ŸþéééèÕ«ÜÝݱbÅ üðC$&&JnàwvvFBB€Wç<:uê„ÜÜ\Œ10uêT¤¥¥ÁÔÔEEE å•GÞòÕe^^^ðõõEzz:¢¢¢PRR‚%K–pžŸˆ$?ûöí«uz˜0afΜ‰ÜÜ\\¿~¹¹¹˜7o^Ù몯fÛeee°³³Ã¶mÛ(·¾^ ±XŒèèh\½zURÎ¥êùììì@D°µµ…¡¡!ªªª`kk‹;wîÈ웯/wõ2nذAªÜ××K–,AZZΜ9#•«¾ý®f= Ù·ÊÛÛ¡¡¡’ÏçÎÃÈ‘#9Ï?~üx,[¶ yyy*É󦱱±Á¢E‹põêU¤¦¦bíÚµ¸ÿ>F±XŒ¯¾úJ%ƒ4Mjj*’’’0tèP¾£0 ÓT¢¢¢(**J™*†»?ÿ$ˆž=ã; /***èØ±cdggGhøðátâÄ *//ç;Ã4ª²²2 ¡¡C‡²³³£cÇŽQEE…ÊÚhÌã V[ýj«›aš›u´ŽlÉ–ïïùóçtìØ1š>}:µjÕŠPïÞ½iÕªUtíÚ5zùò%߆iFhÏž=4vìXÒ××'---zÿý÷iÛ¶m”’’Âw¼:ýòË/¤¥¥EÑÑÑ|GiÁÁÁ¤äi&ÎÔÝ_Ÿýúë¯4eÊ …¤««KãÆ£#GŽPQQßñäÒ¥K4iÒ$ÒÒÒ¢öíÛÓòåËéßÿå;V“Tó¼jEE¥¦¦Ò¡C‡È‚~ùå©ig̘A7n”©cÿþýäçç'SïåË—%ŸSSSeÊž«j}Ý¿ŸÄb1ݸqCfEÚèÝ»7ݹsGòùÌ™34räHÎ9ä•_ºtIîï¸îwÕõ4dßV4oµ¢¢"‰D’ÏB¡ÊÊÊ8ÕMDT\\L , SSSrqq¡]»vÑÓ§O9ÏÏÈ÷àÁZ¶lµk׎´µµÉÍÍMjhê>L:::M¶Ÿ Oc^c˜Æ¦ ÷›²7„0MGõB^¾ä7BCCѵkWL›6 ÖÖÖ¸qã._¾Œ‰'²Wi2o]]]¸ººâÏ?ÿĵk×СCL:]»v•‘Ï0 £(D(F1ß14REE~ÿýw¸ºº¢M›6˜6mÒÒÒ°råJÉk¾¿ýö[ 8ÚÕ}6†aè@Ÿ IDATÔ©¼½½qêÔ)dffâèÑ£h×®V¬X±XŒ~ýúaÇŽùD=ôéÓ ,à; Ã05ìÙ³ÙÙÙXºt)ßQ˜fèåË—øá‡СC|ýõ×?~<âãã {{{¾#2 /Äb1Ö®]‹ÇcÆ 8wîºtéwww¤¥¥ñ¯É8óXŽåØ„Mƒ1|Ça†aàöíÛøä“Oжm[Ìš5 D„Ÿþ8rä\]]ѲeK¾c2 £ÆÆÆpssCPP222°oß>TVVÂÓÓmÛ¶…——îܹÃwLN"""0`À¼ÿþûxôè8€””¬]»]»vå;^“%  ­­ KKK,]ºðôô”š.""S¦L‘™ìر—)ïÓ§äÿ-,,dÊÚ·o¬¬,™6&Nœ(S—««+"""$ŸgÏžŸþYjšÀÛÛ»Áy¹6l˜Ôg±XŒôôô×§•\“8zô(.]º„={öHÊT±¾’““1~üx8pýû÷—ù½"mÌœ9’Ï;wîÄ¢E‹8娼LÕ¹¸ìwÕ²o7Õñ¶. °uëV$%%ÁÓÓׯ_Ç{g3f   @%ßDݺuÃúõëñèÑ#9r1bˆ°°0¾ã)í?þ€D"ßQ†i"Ø€¦é06~õß7¨#”€±cÇÂÕÕÄÇqüøñZ;Ç ó¦8p ~ûí7ÄÆÆ¢ÿþpuuŸq㘘Èw4†aš s˜# Y¨BßQ4Fzz:¾þúktèЮ®®(,,ÄîÝ»ñìÙ3\ºt óçÏg¯ùf¦ÑaòäÉ8|ø0233qþüy¼ûî»X¾|9Ú·oéÓ§ãæÍ›|Ç”ؼy3þøãœ>}šï8 Ã(--ÅÆáëë KKK¾ã0ÍLDDzõê…o¿ýóæÍÃãÇñã?ÂÆÆ†ïh £1D"æÍ›‡¸¸89r×®]C×®]±iÓ&¼|¥ˆ‡xˆ©˜Šé˜Ž…XÈw†aFAçϟǰaÃЧOܸq7nDZZN:…3fÀÐÐïˆ £”Ñ£Gó¡I122‚‡‡Μ9ƒ´´4lذ×®]CïÞ½1bÄ¥n’W§ÈÈH 2£GF›6mpóæMDEEÁÓÓ“=ÈTˆHò“——‡uëÖÁÇÇ)))RÓåää@,KTÿXXXÈL ¼ÚߪµhÑBnÙë7¶gggKn°¯©]»vÈÎΖ|~ûí·a``€èèhÀŸþ‰–-[JÝ”¯h^®LLL¤>ëéé)tƒ>WYYYðòò‚••´µµ%ù•‹ àøñãRýU¬/ggg¡¸Xþƒicúôé8vìJJJ””„äädŒ£Ü ä–sÝïª5dßn¨ S§N’Ï;vDjjªÂõcêÔ©8tèRRRPZZ ???•d|“éèè`òäÉÇõë×Ѻuk|ðÁ°··Çõë×ùŽ× D„³gÏ*ýïa˜7 Â4ff¯þ+§“×ÜTUUaÍš5èÙ³'ž>}Šÿýï ”ê\ªÚë}@ 8ÇŽS[»|SÕ—µ¦Ð¶ºÛãs]¾ýöÛ8|ø0.]º„G¡GX·nªªØMÝ Ãpg T Ùhþ}ú$''ãã?†X,ÆÞ½{áííÄÄD\¼x³gφYu¿¬‘¼ÞG100@·nÝàíí¸¸¸FÍ¢jê8vòµ¾šË¯›ËrÔ¦¹,Ÿ®®.qàÀ¤§§c÷îÝxøð! {{{\¸p¡C‡bÒ¤Iøúë¯Õr1ŠaÅøûû#77K–,á; ÓŒäææâ£>ÂèѣѥKÜ¿ß}÷LMMÕÖ&ëkN½š¢©-Ÿ––¦L™‚˜˜,^¼+V¬À»ï¾‹7nðM#å!ã1ÝÐ {±—ï8 Ã0Œ"##1xð`8;;ÃÐÐ/^DLL æÎÛhçXå]~ý§!õi*ví—ª¾&ûú>* aee…ñãÇãàÁƒ(//¯uÞÄÄDøøø@,COO­ZµÂ AƒðÕW_InZ^ÝhyèÐ! 8­[·†P(D¯^½°|ùrÄÄĨtyêÒ¦M|þù爉‰ADD„B!5ê&Ö‚‚øúúbèС …¸~ý:N:;;;¾£5[­ZµÂ¬Y³0kÖ,¬X±BêwfffÈÍÍ•@RýSRR¢’öÍÌÌä¾m#==]æø5{ölìß¿°ÿ~©·ƒ4V^uòðð€H$ÂÕ«WQZZ*É®Œââb¸¹¹aãÆèÑ£‡ÔïT±¾V®\‰Ó§OÃËËKî÷lEÚhÓ¦ † ‚   øûûÃÏÏOmÇEö»Æ õöggg¥ï‰D"lݺ'OžT6SÀpúôiDFFBWWC† Áܹs›Ü›Xþù礥¥aìØ±|Ga¦ aB˜¦ÃÔ€œ¾“¨U~~>\\\°víZ¬[··nÝÂСCÕÞnõ–êNþË—/ƒ9sæ`Þ¼yرc‡Ú3ðÏ›²»mu·§ 7¸ >·o߯š5kðÝwßaìØ±M®SÏ0 ,ðê‰#ÏðŒç$ü)((ÀüùóѵkW\½z»wïÆ£GðÝwßA,ó–«f?¥ªª 8r䬬¬0pà@¹ñ[S°õżi ññÇãÖ­[¸|ù2Œáää„‘#GâîÝ»|ÇÃwß}‡èèhœ:uŠï( óF{ñâ~øáÌ;íÛ·ç;ÓLܾ}}úôÁõë×qöìYœ8q¢QÞÂú{Ls! ±bÅ }ºÔï#¯¢Ð‰U«VA,C[[”ÈòÙgŸaÀ€˜5k–L&U¬¯iÓ¦¡_¿~øïÿ‹I“&Éü½T´™3gb×®] …§§'§ ¡È~טüýýqñâE,Z´HR¶hÑ"ìÚµ«Ö¿Ï!!!Rý>@ ÷ /ÚÚÚìMqj2hÐ üñÇøå—_püøqôìÙÿý7ß±8;}ú4Úµk'Õwa†©)!**Š¢¢¢”©‚aÓªQ@ß)Ô&;;›úôéCVVVô÷ß7zûµýI'›FNÓx”üSؤÚVw{|®Ë×ݼy“,--©oß¾”­Ð¼y|@ÁÁÁj«? ÿÍdUË£<ÎÓy¾£ðâôéÓdiiImÛ¶¥€€*//ç;’”ÚŽ1aaaÔ³gÏFN£:ê:vò±¾4© Œæ²µiîËWíÊ•+4hÐ ÒÑÑ¡o¾ù†^¾|Ékž &PŸ>}¨ªªŠ×êÜhû—ºûë“'O¦É“'«­þšX½ñlÚ´‰D"eddð…i&þúë/266&ÊÊÊjôöYÿX3êÕÍaù*++iÕªU$è»ï¾ã;ŽÆ˜GóHHBú›ÿzÃ0LsÆõ»XC¾FFFRÇŽÉÒÒ’~ýõ׆ÄS©×û õ}nHš†]ûmêZ¶€€‹Å”ŸŸ/){òä õíÛ— åÎ.U§¹¹¹Üs÷ïßטõzìØ1jß¾=Y[[Óµk×½ýÀÀ@Ò××§1cÆPNNN£·ÿ&©kŸsww§~øAò999™zöìIǧììl*,,¤°°0²´´¤“'OÖ[/—²¸¸8²´´¤½{÷RFFeffÒ¾}ûÈÒÒ’âããeæŸ={69::Ò'Ÿ|"ó;EòÊÃu™ÖÊÊŠ"##©¼¼œÎŸ?OVVVµNïääDÞÞÞ”œœLeeeOžžž ^·?ýôõìÙ“JJJäN§êõD¶¶¶ôìÙ3I™¢m”••‘©©)-_¾¼ÞökËQ_9‘bû]C×?—¼/^¼ ääd:zô(5ŠÞy犋‹“™',,ŒúöíK'Ož¤¼¼<*--¥øøxZ³f YZZRtt´TýöööI………T\\LQQQ4jÔ(Zµj§ŒLÃegg“³³3éëëÓáÇùŽÃIÿþýÉËË‹ï*ט×ߦ±iÂý¦l@Ó´têD´nÊ«@´zõj²¶¶&]]]zûí·iÇŽ2Óþþûï4`À200 0`:uJé eee4tèP‹Å”””¤t} Q[G¸  €ôôôdÊïÝ»GcÆŒ!CCC244$'''ºwïžLèñãÇôÁ¡¡!™››ÓŒ3äÞ¤_W—.]’ÔW3ëóçÏÉÐÐRSS%eZZZ’é.]º¤ðrsY6"¢ÐÐP^ÓêãíÙ³‡;FÑ6ÚF-¨“êÏjú¹ †auS×€°°0Ò××§±cÇò2h˜‹ºŽï\¯{r½¨ÌùM®Y¸.ã›píW™m‘œœ,ÕŸŒ§çÏŸK•%''ךáõœŠÞ#Pß²ÉãããC_ýµäóÂ… eú)u144¤ÒÒRÎÓó%33“\\\H(ÒéÓ§­ÝãÇS‹-è‹/¾ ÊÊÊFk÷M$"##%¿¯>¶ÄÅÅ‘››“H$";;; ©·^®eDDÑÑÑäììL"‘ˆD"9;;KÝà^ÓŸþIj½Ã%/×uS[^E¦=vìÙØØ®®.ÙÚÚRhhh­Ógdd‡‡™››“®®.õèÑCr.¼!ëVOOOî÷øšÓ4t}KÕ•šš*U```ƒÚxùò%‰ÅbJKK«7ÃëË]Ûú¨mç²ß)»o×—WOOÚ·oOcÇŽ¥Ôy¬¸ÿ>MŸ>ÌÍÍIOOÄb1y{{SBB‚Ôt×®]#ooo²µµ%]]] …Ô»woÚ±cG³~˜&©¨¨ Å‹S‹-8ý{âSFFF“ÈÙl@ÓœiÂý¦l@Ó´ôïO´x±Ê«@>>>´|ùrЧ¢¢"º|ù2½õÖ[$™îÚµkdjjJ‡¦ÌÌLÊÈÈ ÀÀ@211¡›7o*•á›o¾!###Љ‰Qvq¬¶Npxx8uéÒEª,>>ž,,,Èßߟ233);;›é­·Þ¢'OžÈÔëèèHgÏž¥çÏŸÓãÇÉÕÕ•fÍš¥pQQQÔ²eKª¨¨ÌwðàA²´´¤­[·JÕ×µkWN£^_nE—- €Š‹‹)==<==iæÌ™RÓ$$$P‡hÿþý”‘‘AYYYHƒ RºíÑ£GÓ_ýE%%%tæÌ…N V·gee%É–‘‘A{÷î•aÿðáCêÒ¥ …‡‡Sqq1Ý¿Ÿ†*÷Kvµ¨¨(‹Åµžh,ÑÑÑdhhH+W®ä<& U…]üðC©ùmCEú ]_\sN˜0V®\IÏž=£ÒÒRúçŸhÈ!uöjz½/”MG¥=zȧ¾íÇe½pͬȺSd»ÖVoCö§êyëës*»ÔÝåÓÝ»wÉÚÚšúöíKÅÅżåpqq¡AƒñÖ¾º±! ÃúëcýúõdhhH™™™|GašÊÊJêÛ·/õïߟ^¼xÁ[Ö?VMÿ˜õñ¤ëД>ÞÊ•+IWW—bccyi_œ¢S¤EZ´6¨¥~M?WÁ0 £nêróæMÒÕÕ%///©ë˜š¦®ã;—þÉëuÔv=Pý7.Y¸,ã›ríWÙmMvvvRóVUU‘­­-ݹs‡sŽše\îP¤Îšþþûo©ï>ݺuSèÁ›Ó¦M£ Pnn.çyøRQQAü1éééÑß«ÿÍq·oß&}}}š?¾ÚÛbF󓇇ß1F%>ÿüs …t÷î]¾£Ôj÷îÝ$‰¨¨¨ˆï(*Ç„0Í™&ÜoÊ„0MË„ DS§ª¼ZôÍ7ßÈ”Ÿ>}š  ùcÆ Ú¸Qö†Ùýû÷“ŸŸŸL½¯¿V066–Ú·o¯pUUUÔ¶m[©W¢Ž5ŠÂÃÃiðàÁ’²øøxjÛ¶-§‘Ô¯ŸLRdÙ^—››K¦¦¦Reîîî´mÛ6™iþùg¥ÚêûÉëäµ'/ÛæÍ›ÉÝÝ]òyÚ´itèÐ!©ibcck½ø|ÿþ}‹Åõ>…»±|ÿý÷d``ÀùiLšp€VvƒÃ(f OéS•ÖY^^N–––2ƒ>³³³©U«V’W’{xxH½ò™ˆèäÉ“4fÌÉgCCC*((PY¶ .@ Pè©Y|©>ÆTUUÑóçÏéöíÛ´råJ255¥ .ÈL[Û1’뱯ºžË—/K>W?I§fÙ“'OÈÜÜ\©6jËÙÐþHÍzëZ_\sʼ9&))‰óMhµõ…Ž=Zë…º¶—õÂ5³¡Oꟸ4ù\Ã0LcPõ€—/_’­­-9;;kü“ôùî.¯R³Žº®*ÛãšEž7ñÚ¯ª¶EïÞ½¥œ9s†FŽÉ9Çëe\îP¤ÎšŠŠŠH$I> …B…ÞàX\\L , SSSrqq¡]»vÑÓ§O9Ïߨ*++ÉÑÑ‘:wî,y“›º899Ñ Aƒ4þïÃ0ê¼zëKŸ>}èŸþá;èDEE 0€\\\øŽR«aÆÑT5Ü« Ø€¦9Ó„ûMÙ€¦iY¼˜¨_?•W €eÊsssÉÄÄDò¹cÇŽôøñc™é=zDÖÖÖ nßßߟŒŒŒx}Z-‘ìkðP»ví(<<\fÚ¶mÛRJJŠLù³gϨ[·n2õ¾þdÒÒR ªsæÌ™´fÍ""zúô)õû¿}bäÈ‘’§~lÞ¼¹ÁO‰QdÙ”©/==]©¶(¼Ïpm/))‰,,,¤¦KMMåTRR½õÖ[u£[õ Au½\l@Ãh–Ïè3NÃU^ïÚµkÉËËKªlÆ ROSéÔ©ÅÅÅIM“››K–––’Ïýû÷'ooo•]”ùäzøð¡Üik;Fr=öU×SXX(ù\YY)·ŒkŸ¦¶6É©H¤¾õÅ5çèÑ£iÈ!töìÙ:/4)º,ÙÙÙµ^(Ttû½¾^¸f–GÑå·]k«·!ûלêÚNªê‡j‚_~ù…tttä~§k,#FŒ qãÆñÖ¾:±! Ãúëê·fÍjÙ²¥Ì Õ ÓPcÆŒ¡‰'òƒõÿOCÏ×5$[mõ²>žz’®®.åååñ¥Q=¥§dEVäDNô’Ô{£Ÿ¦ž«`†i ª¾^tìØ1ÒÖÖ–{ÜÕ4Š~w¯íü]}וí¿qÍÂeºæ~íW•ÛbÛ¶mRT3f :uŠSye\îP¤Îšž?.3 ¤´´”SÝ5åççÓÑ£GÉÃÃÌÌÌhúô锟Ÿ¯p=!))‰´µµé×_U[Õ…¼xñ¢ÚÚ`¦i@:::´`Á¾£0ŒJ…‡‡¹çQù–ššJ-Z´TÛ\°!Ls¦ ÷›¶Ã4%: j©Z,Ë”™˜˜ //Oò9##æææ2ÓµmÛÏž=kpÛÑÑÑèׯ \‡ªÐ«b "äååaݺuðññAJJŠÔt999‹ÅR?2Ó¯ÖeMzzzxõ·Iñ:]\\pñâEÀ‘#G0}útÀÔ©S Ø1c´¸æÈÊÊ‚——¬¬¬ ­­-™îuÙÙÙ°°°)—W¦èzUvŸ©-[»ví-5™™§:QTT„ââb¥²©’H$‚îÞ½Ëw†a4\tA,bU^¯BBB$[+++±{÷n|þùç’ižJII bcc±oß>tîÜYî´µ#¹ûªIþ¿E‹rË^_ÿж!¢}yê[_\s?~ýû÷ÇܹsajjŠaÆaûöíxùò%§µµÓºuëZç©mûq]/ÊfVd9Ù® ÙŸ¸ö9•]æÆî‡òaúôéÐÒÒÂÕ«WyËàçç‡Ó§O#..Ž· ó&),,Ä–-[°páB˜ššò‡i&îÞ½‹áÇóë+Ú?–‡õñ¤iRoĈ(//Ç¿ÿþËw”FSˆBŒÁÃA‚6´ÕÚž&ž«`†iª"##Ñ¿X[[ó¥Á¸öOªÕw=P™þ›¢YêÓܯýªr[LŸ>ÇŽCII ’’’œœÜàkî·{êÁƒèÔ©“äsÇŽ‘ššªp=ÆÆÆ˜:u*:„”””––ÂÏÏO%UÍÆÆvvvøë¯¿ÔÖÆíÛ·¡««‹¡C‡ª­ †aš"Byy9¶nÝÊw†Q©áÇCWWwîÜá;ŠŒ£G¢eË–pvvæ; Ã0MÂ4-¶¶@~>Àñbœ"ª/ÖÕ¥mÛ¶ÈÌÌ”)ÏÌÌ”{‚‡«/^@__¿Áó«K«V­0kÖ,Ìš5 +V¬ú™™rss¥Ô¼Ý\ëtttÄÍ›7ñâÅ =zÓ¦M¸¹¹!$$yyy¸~ý:ÕšÃÃÃ"‘W¯^Eii©dyõÉ0”““Óà¶UÅÌÌLêâ]µôôt© ­[·æ||åÊ•8}ú4¼¼¼pãÆ •eU–P(TË:d¦yé‚.È@òWÿÄ 033äI“ ………ú÷ï/™¦eË–HKK“ùû_YY)™ÆÆÆ'Nœ@~~>‚‚‚0hÐ ¬]»^^^ g*))Aee¥ÔJo®Ç>¾ÛhŒ>×œÆÆÆØ²e ‘””„¹sç"((HÒãÒŽ¼¾PCTs]/Êf®­muï;òpísªb;5f?”ÚÚÚ …(,,ä-Ã|±XŒÝ»wó–aÞ$[·nEee%æÏŸÏw¦yñâ„B!ß1TæMï³>žæªþw¦é9U¥åpƒ²‘³8‹Vh¥ö65ñ\Ã0LSUTT‘HÄw ¥píŸT«ïz 2ýE³Ô§¹_ûUå¶hÓ¦ † ‚   øûûÃÏÏO©Á8ꀉ'J>;;;#<<\©:E"¶nÝŠ“'O*Om QTT¤¶ú‹ŠŠ  ¡£££¶6FY£Gæ;Ã0M˜ŽŽï× kŒI“&AOOï( Ã4Al@Ó´ØÚ¾ú¯šÞRŸ>}ú ,,L¦ü÷ßGŸ>}\ï[o½…û÷ïkì³æÌ™ƒÐÐPäææJÊœœœpùòe™i¯\¹ÒàVÍie IDATuÁµN¼÷Þ{صkÚ¶m+Œcjj lÙ²ýúõC«V »hÆ5Gdd$V­Z±X míWOk“wâÎÑÑQîI£ê·œ4¤mUqpp@HHˆLù‰'ààà ù>^å¹EDˆ‰‰mõß ¢©'QæMÕ]ñPåuÏŸ?þþþxùò%vìØyóæIýþý÷ßGhh(§ºôôôЫW/øøøàìÙ³V8OëÖ­Ñ¡C¥/45\}ÕFmÇÆèpÍ)ðôéS¯.N™2§OŸÆ… ¤æ«mYjë ?^áÌ\× ×ÌòÔ¶±ïÈõϩìvjì~(nÞ¼‰¼¼<ôîÝ›· ZZZ˜3g8€‚‚Þr¨CõCªªªxN¢<@ ±ßÏî °}ûv,^¼X扤 £ŒN:áÞ½{|ÇP™7¥ÌúxÒšB/::4ò|žªU¡ ðÀMÜÄ)œBth´¶5í\Ã0LSõÎ;ïàÖ­[Mz #×þIµú®*ÓQ4K}šûµ_Uo‹™3gb×®] …§§§J2ªš¿¿?.^¼ˆE‹IÊ-Z„]»v!??_î­É÷xÜ¿#GŽ„‘‘‘ÒÛº1¤¦¦ÂÝÝíÛ·‡––V­™›Â²Tk*9ßdñññ(((Pëñ´!>|ˆ¨¨(L™2…ï(jCDìßÃ(©ú¶ÜK¤„¨¨(ŠŠŠR¦ †QLE‘®.Q` J«­ëŸBÍßýõ×_dbbB‡¦ÌÌLÊÌ̤Ç“©©)ݸq£Áíß½{—Ðùóç\‡*ÔµÜÝÝé‡~|NNN¦ž={ÒñãÇ);;› ),,Œ,--éäÉ“œê}½\‘:×®]K"‘ˆ>,U~øðaÒÑÑ¡µk×rZfer899‘··7%''SYYÅÇÇ“§§§L}‰‰‰Ô¡CÚ¿?effRNNS¯^½”Z ùþú´{÷î:ë "[[[zöì™ÂYUéÌ™3€îÝ»ÇiúÆ<¾ikkËìǪ ¶º¦9ª¤J’~¦ŸÕRÿ¨Q£èË/¿¤¶mÛRYY™Ôïbbb¨]»vtðàAÊÎΦ¢¢"Šˆˆ 1cÆH¦:t(Ò“'OèåË—”žžNË—/'—åÙ¼y3‰D"Îù¤Èq¯®i¹ûj«‡K™"mXYYQdd$•——ÓùóçÉÊÊŠˆë(ºÍ €œœœ(&&†JKKéÙ³gôå—_’««+§ey½/”MGŽ¡!C†p^ÇÕ¸®®™å©m9Ù®ò4tâÚçTv;©»Ê·ÒÒR4h 2„ï(”——G"‘ˆ¶oßÎw•:}ú4 çÏŸ«½-u÷×=<§*¶“ºú¡|+))!WWWjÕªýûï¿|Ç!"¢Ï>ûŒºtéBUUU|GQ™?ÿü“PzzºÚÛRwÝÇLJÔVM¬¿®yyyÔªU+Z·nßQ˜f(''‡Z·nMŸþ9oXÿX±þq}õ²>žæõñ"""H Ðo¿ýÆwµû†¾!-Ò¢ãtœ· šv®‚a¦1¨z@Ñ?þHZZZ*¹n¬.uï¹öO¹ØÐþ›"×`¹.cs¼ö«®mñòåK‹Å”––Æ)‡"ß ¸.ãëëCOOÚ·oOcÇŽ¥Piii­óÞ¿Ÿ¦OŸNæææ¤§§Gb±˜¼½½)!!Ajºk×®‘··7ÙÚÚ’®®. …BêÝ»7íØ±C#ÏÙ…„„––íܹSím’––:uJím1 £ì÷6eæoìðõuš˜©š¶¶v“º™\OO¯Î¿é5iòz¯©©ä|S…††’–––ZÜÛ¥¥¥Ô¦MZ³f ßQÔÊÁÁ :eš­Æºß´¸¸˜Èí«³!LÓãéIÔH7f4¦””233#777zùò%ßq¦Yxùò%¹ººR›6mèÑ£GœçkÌ㛡¡!íß¿_mõ³ÌFqsi.õ§þj©»¤¤„ôõõééÓ§j©¿!rrrhðàÁdbb"w9èKLL Y[[óƒiæhÀ€djjJW®\á;ŽDõ["ÿ÷¿ÿñEeþùçÀéiêÊRwÝÏÏìííÕVM¬¿®Ë—/§Ö­[sÈ0 qôèQìß0èAll,™™™Ñ”)SøŽ¢v»h H@{i/¯94ñ\Ã0Œº©c@Ñ«@èèèÐÞ½üþmge“‡‡ß1˜H[[›æÎÛhmz{{“H$Ò¨sªš.44”LzzzdmmM ,97U=ÈéñãÇôÁ¡¡!™››ÓŒ3ä¾ù¥æà³–-[ÒĉéÑ£Gœo8Wd~Eò×üùä“OªCÙ¬u 6KHH<êõéîÝ»GcÆŒ!CCC244$'''ºwïžLŽ;wîÐèÑ£ÉÀÀ€ŒŒŒÈÙÙ™Î;ÇyÜ»w\\\$í¸¸¸È´SWÞääd©ºãããéùóçReÉÉÉr×áë¹–.]ªÐúášûÁƒ4`À …dooOñññCýû÷'‘HD#FŒàt޼uY×ÀAye\¶k~~>-X°€lllHOO,,,hæÌ™tãÆ ""Îë¼¾zj[¦×÷úòªz=3ÿßåË—ÉÀÀ€>ýôS¾£È $mmmJMMå;ŠZÙÛÛÓüùêy8+Ãð­±î7ÍÏÏ'tþüy™ßµÃ45ÿ TUñD¥¬­­ñÛo¿áܹs˜2e ^¼xÁw$†iÒJJJðá‡âÂ…  AÇŽùŽ$—ŽŽ^¾|Éw †aj„A¸ÛxÕ‹óóó±qãFxzzÂÒÒR¥u+ÃÔÔ/^„››\]]1uêTûì3lݺ•ï8 ÓlDEEaذaèܹ38Àwµ Bæa6`¼àÅ[M=WÁ0 ÓTíÞ½_|ñ|||0}útäääð‰a8¸yó&6n܈… ò‡••…©S§Â××_~ù%vìØÑhmïÞ½ÎÎÎ5jöîÝÛhí6e&LÀÌ™3‘››‹ëׯ#77óæÍ“š†ˆ^^^ðõõEzz:¢¢¢PRR‚%K–HM›˜˜|øá‡HJJBbb"ÜÜÜ0uêTNy_‘üôêÔ "ìÛ·O¡:”ÍZA^™¯¯/–,Y‚´´4œ9sFòû„„Œ=ãÇGRRRRRàîîŽ &àéÓ§’éâââ0eÊ|ñÅÈÊÊÂõë×Q\\ gggNë !!A²‰‰‰HLLĤI“àì쌄„NyÅb1¢££agg"‚­­- QUU[[[ܹsb±Xîz|=Û† 8¯Er¯\¹?ÿü3ÒÓÓÑ«W/¸»»cÅŠ8xð RSSÑ­[7,^¼XnƺòÖüá‚ëv9s&Zµj…k×®¡  gΜABB  Ð:¯¯žÚ–©æþÁ%¯ª×3óJ@@1vìXìܹ“ï820a´oßžï(jUZZ }}}¾c0L“V}Ÿ©ŽŽŽì/•iÂÞÂð"*Š ú÷_¾“¨Å•+WÈÔÔ”zõêE>ä;Ã4Iÿþû/õìÙ“Z·nMýõ—Âó7æñ­M›6j}µ/{Z)Ã(.‰’ºBª{ê200 WWW*((PY½ªvîÜ9zë­·H__Ÿ-ZDééé|Gbš‰S§NѨQ£ÈÐÐZ¶lI}ûö¥ƒò‹i†ÊÊÊhïÞ½Ô¡C200 Õ«WSyy9ß±äò÷÷'¡PH¹¹¹|GQ‰ôôt@þù§ÚÛRw}õêÕÔ¹sgµÕ_믫Þ_|Afffôüùs¾£0o€M›6Q‹-hÆŒÝÏgMWUUEþþþ¤¯¯OÎÎÎTTTÄw$µ:M§I—ti1-æ5GS9WÁ0 £êzCHµsçÎQ»víÈÔÔ”víÚ¥±ç&¦&¤££C ,à;ʯ¬¬ŒvîÜI&&&diiI.\à%GUU}ûí·$hÊ”)ìš‘‚rssÉÔÔT¦øàœ;w™íZß:çZ²y«iÊznªÊË˱cÇtëÖ QQQ8{ö,V®\ @Àw4[¶lA—.]àààÀwµKKKköoAau«ë !l@Óôèè½{7oòDmÄb1"##±qãFìØ±Ý»wGhh(ß±F£8qÝ»wÇ®]»ðý÷ß#22ÖÖÖ|Ǫ—®®.;iÆ0h ªt@HS¢««‹yóæ!%%;wîÄÍ›7Ñ»woØÙÙÁßßyyy|Gd†TTTàÌ™3øðÃÑ¡ClÞ¼žžžHNN†¿¿¿ÆŸPlÙ²%&Ož,u¡) …h×®ùŽ¢4KKK¡°°ï(Œ‚Ö¯_CCC|öÙg|GaÞ NNNˆÅĉ1kÖ, >wïÞå;Ãh¼üü|øùù¡_¿~¨¬¬ÄÍ›7±zõjhiiñMmþÂ_p…+Æc<ö`웆aÕ122¦M›GGGÌ;ÖÖÖX¹r¥ÌM¿ üÙÒÒÒðí·ß¢cÇŽ˜7oÆŒƒøøxlܸQ#xáììŒû÷ïÃÇÇ‹/F—.]pàÀÉ Ñ ••///XYYA[[ Î›ŽMLL¤>ëééÉÜTž-¹ ¼&yeò(2¿¢ùåQ¦e—µšÜòœœˆÅbI¦ê ¤¤¤Hå033S¨Íšj[ŽvíÚ!;;›s^àÕ¥Ž;†’’$%%!99cÆŒip¶ºÚS4·‘‘‘äÿ[´h!·¬1˜Âu»?~ýû÷ÇܹsajjŠaÆaûöí2n­os­GÙ¼Õ4e=75Ø¿?:wîŒÿüç?˜3gîÝ»'''¾£É•––†ÿþ÷¿X²d‰d;7W………(**Òøë· £éªï3eB˜æcÀ௿øN¡VÚÚÚ˜?>bcc1tèP¸ºº¢wïÞ8tèûbÍ0ÿ§ªª aaa8p &Mš„ž={"&&~~~Mæâ±‰‰ òóóùŽÁ0Ìkìa+¸‚JTò…7úúúðòòÂýû÷…>}úà‹/¾@›6m`ooíÛ·ãÉ“'|Çdæ óâÅ „……áÓO?…••ÆŽ‹””ìØ±?Ɔ `iiÉwLÎ>ùäDGGãÖ­[|GQ‰Î;#>>žïJ«>ÍÞÒ´<{ö ?ýô–-[VçÅT†QSSSøûû#** •••èÝ»7F°°0¾£1ŒÆyöìV®\‰·Þz ‡Æ?ü€«W¯J=å²9º†kp áˆ#8-4s— Ã0Œò¬­­ñÓO?áÑ£GX¸p!öíÛ+++É9Vy7Z2 ÓüàСC?~<¬­­€Ù³g#!!»wïFÇŽùŽ(ÅÀÀ7nD||û ]ºtÁæÍ›Ùµn‰D¸zõ*JKKADJߨmff†gϞɔçää¨|~UäW¦e—•Ký¹¹¹’L5JJJ$ÓµnÝZ©ã²™™™ÜAŸééé 4iÓ¦ † ‚   øûûÃÏÏOmo6PeîÆÄu»cË–-HLLDRRæÎ‹   L›6Mª¾úÖ9×z”ÍË4L^^~øá¼ýöÛðõõ…³³3°nÝ:¾^°mÛ6˜˜˜ÀÝÝï(jW}Í­]»v<'a˜¦­ú†òÞøÌ„0M“£# <}Êwµ³°°À¡C‡póæM¼ýöÛøøãѹsgìØ±ÅÅÅ|Çc^aûöíèÔ©\]]amm¿ÿþaaa°²²â;žB”=©À0ŒzŒÁd#7pƒï(¡oß¾@jj*aii‰o¾ùÖÖÖ°³³Ãºuëpÿþ}¾c2 ÓLeggãçŸÆ„ кuk¸ººâÁƒX²d xZž¢ìííѽ{wìß¿Ÿï(*ѹsgÄÅÅñCiÕBRSSyNÂ(bÆ 066†ßQ˜7XïÞ½qõêU„„„àÅ‹øàƒп;v ••oî`s†€þù3fÌ@ÇŽ±ÿ~,[¶ III˜?~³âmÜÆXŒÅ(ŒB‚  m¾#1 Ã0<°°°ÀÒ¥K‘˜˜ˆàà`˜››cÙ²eèСÜÜÜðÛo¿¡´´”ï˜ Ã¨Ñ‹/püøq¸ºº¢mÛ¶øôÓO¡££ƒ£GJvÓ¡C¾cÖ©cǎسgbcc1f̬Zµ VVVðõõm6½iˆÈÈH¬Zµ b±ÚÚ¯úûÊÞàíè舓'OÊ”_¼xQåó+’¿¶A ʬe—µ>NNN¸|ù²Lù•+W¤N0bÄDDDHMs÷î]tïÞ]ª¬¶uààà€™ò'NÀÁÁAáÜ3gÎÄ®]» OOO…ççJÕ¹ ×í*ðôÿî/433Ô)Spúôi\¸pAfÞºÖ9×zjÛ?¸æe%yˆÜêÕ«1~üx<|ø?ýô“Æß?VXXˆ={ö`Á‚Ð××ç;ŽÚUaoaåx5PQ)!**Š¢¢¢”©‚a¦´”ÈÐ( €ï$.))‰üüüH$QË–-ÉÃÃ~ÿýwª¨¨à;èUee%]¹r…|||¨eË–¤¯¯O«ò¶óøöÑGѤI“ÔVÀøw’aTÅ–léKú’ïëåË—tåÊòóó£öíÛ²°° É“'Ó¶mÛ(**ŠªªªøŽÉ0LTXXHááá´téRêÛ·/µhÑ‚ôõõÉÁÁ¶mÛFiii|GT©M›6‘±±1óEi›6m"+++µ·£îþzUUÒÞ½{ÕÖF5Ö_W´´4 …´sçN¾£0Œ”[·n‘‡‡ikkS»víÈÏÏ®\¹Âw,†i4999@C† !Ô«W/  /^ð­ÑÜ¡;dJ¦äHŽTJ¥|Ça†aþ×ïbêþþ™ŸŸO¿üò 7Ž´µµI(’ƒƒmذ݇Á0ÍDbb"ÐäÉ“ÉÈȈZ´hAC† ¡mÛ¶QVVßñ”VPP@Û·o§.]ºzçwhÆ ôôéS¾£5*'''òöö¦ääd*++£øøxòôô$y·åÕv«Þë剉‰Ô¡CÚ¿?effRNNS¯^½j­£¡ó+’ßÊÊŠ"##©¼¼œÎŸ?/9¬HÊd•·®j+«–œœL={ö¤ãÇSvv6RXXYZZÒÉ“'%ÓEGGS§N(<<œŠŠŠèîݻԧOÚ½{7§uG–––´wï^ÊÈÈ ÌÌLÚ·oYZZR||<ç¼ÕÊÊÊÈÔÔ”–/_^ï´õÕ[W{ÊæVt{44ïëe\·+rrr¢˜˜*--¥gϞї_~I®®®2mԵιÖSÛþÁ5/×寭ìMðäÉZ¿~=uïÞP×®]éǤÂÂB¾£)dÆ dddDyyy|GiÛ·o'333¾c0ŒÚ4Öý¦'Nœ TVV&ó;6 „iº>øàÕÏ*;;›¶mÛFýúõ#Ô¡CZ¶lÅÄÄðaTêÞ½{´téR²²²"dggGÛ·o§œœµµÙ˜Ç7___>|¸Úêg7˜1LÃͧùÔƒzð£I¨¬¬¤k×®ÑÚµkÉÑÑ‘D" víÚÑ´iÓÈßߟ>žÞ½{ãîîN@@Ó§OgÚ´i˜››«·ÃÜÿýdggsìØ1µ£Ü‘+W®0xð`"""ðóók·¯Óãõ¥K—’ÍÁƒÛík€Œ×ÛBzz:C† aÆ ¬X±Bí8Bü¦ØØX¾øâ þóŸÿ““ƒ——AAALŸ>I“&¡§§§vD!nÙÕ«W9|ø0aaa8p€ªª*¦L™Â’%KX¸p!ýúõS;b‡;ÉIæ0‡»¸‹oø†¾ôU;’Bˆfnö½˜Zë!´ó«aaa„……qôèQêëëqqqÁßߟ€€üýýenUˆN@£Ñ¡œ9{ö,£Fbúôé=ò½^MM ß}÷_ý5ûöí£¸¸î½÷^æÌ™ÃèÑ£éÝ»·Ú1…èôvíÚž}ûضm›ÚQz ùžwœ>}šƒ²wï^Ξ=‹™™,X°€9sætéõï¾û.ï¼ó—/_ÆÊÊJí8bܸqŒ;–7ªEˆvÑQó7ndýúõh4šëîÓmׯ,D{ „+àèQ˜3Gí4ªòððà7Þàõ×_çĉ„††²ÿ~¶nÝŠ±±13fÌàž{îaæÌ™ØÙÙ©WˆëdddpèÐ!öïßÏáÇ)++cèС±uëVüüüºí„ö€(((P;†¢“™Œ &ìg?Ïò¬Úqº¥àã…^ ¾¾žèèhNŸ>Mtt4?þø#[¶l¡¡¡sss|}}=z4¾¾¾Œ1gggtttÔ~Bˆ;”››Ë… ”Ÿþ™äädìííñõõåÁdôèÑøùùabb¢rbõ,_¾œY³f§§§Úqn›««+¶¶¶;v¬] B:‚··7?üðƒÚ1ÄMxçw°²²bÙ²ejGâ¦x{{³nÝ:Þzë-~üñGöíÛGHHëׯgÀ€Ìš5‹{iÓ¦õ˜“¢ë©ªªâĉ8p€ýû÷“€‰‰ 3gÎäÃ?$((ˆ¨S5Ç9N Ànvc€Ú‘„Bt1ÍçWW¯^M~~>ÇŽ#""‚“'O²k×.jjj°´´düøñøùù1~üxFމ‘‘‘Úñ…è¶JKK‰‰‰!22’'NI^^}úôaôèÑLž<™?ýéOLœ8±G‡ûôéÃüùó™?>µµµ9r„={öðÉ'ŸðÆo0pà@fΜɜ9s˜>}:ÖÖÖjG¢SéÕ«QQQ¬_¿žO>ùDí8=‚|Ï;‡¬¬,e³‘Ç“ŸŸ££#÷ÜsëׯgòäÉÝ¢À²¸¸˜¿ýío¼ôÒK=fþ·¡¡ØØXüqµ£Ñå•””Üp]…t]›Üu|ü±ÚI:¥””:¤ –ÊË˱±±QvÝ•c„Z´;¥„……N\\ÊÏæ¼yópwwW-_G¾¾íÞ½›û￟òòr Û¡Û‘ì8,Äy€È!‡£U;J·SWWÇ¥K—ˆŽŽVvÏJLL¤¡¡}}}Œ··7^^^ʵ‡‡‡ŠÑ K\\\‹ë¬¬,lllZt=z4666*§î\quueáÂ…üíoS;Îy衇(..æÀíö5:b¼þÝwßqÏ=÷P\\Ü®ÅJ2^¿3iii 2„-[¶°|ùrµãqG’““ #44”C‡Q[[{ÝÐÞÞÞjÇ=Tyy9'OžTv=§ºº bâĉèëë«UuG9JALa !„Ї®»[¥Btg½CÈo©¯¯çܹs„‡‡ÍñãÇIIIZÎÃhçUå|°·N£Ñ(óœÚ ohllÄÚÚZÙä* €€€ ¤øf$''ʾ}û8~ü8555²†EˆkôêÕ ===žyæ>øàµãôò=WǵkÇâããÑÑÑaìØ±JGeŸn÷šðꫯòÏþ“+W®`ff¦vœqêÔ)ÆŽKll,^^^jÇ¢]tÔüÁK/½Ddd$‘‘‘×Ý'!¢kûÛßàí·A£¾Òrý×TVVɱcÇøé§Ÿ8uêUUUØÚÚ2iÒ$Ƈ¯¯/#Gޤ_¿~jÇÝHEE111DGG+?ƒCCCÆŽˤI“˜8q"ãÆ£o'ù=îÈ×·sçÎ1räÈvôÊ3!îÌ>öq/÷r‰K f°Úqº½òòrâãã¹xñ"ñññÊÂòÔÔTššš000ÀÃÃ///<==quuU.=y·-!:Buu5W®\Q.—.]RNˆj»™››·(âòòòbèС²ÃÛMzûí·Ù°a]úò?ÿùOV­ZEQQººíÓ˜¶#Æë;;;˜6mZ»}¯ß™'žx‚°°0e²èVÊÊÊ'<<œcÇŽqúôijjj°··Wæñ|||9rd§™KÝGCCƒR¼úôiŽ?ÎùóçihhÀÓÓ“ &0aÂ&Nœˆ£££Úq;•ïøŽ…,dóø‚/Ð¥}ÆBB!î\W/iMZZ111ÄÄÄpîÜ9bbb¸zõ*MMM˜››3räHFŒ§§'nnnxzzbii©vl!T—““CBB‰‰‰ÄÇÇ+¿?EEEôêÕ å÷gĈŒ9RÆÁm¤¼¼\)8×¾÷­ªªÂÆÆ† &À„ 6l˜l&„]\CCçÏŸçøñã?~œððp²³³144d̘1Lœ8QÙ§;¯[ÌËËÃÕÕ•×_—_~Yí8fݺulذ¬¬¬nWà#„VGÍ<ðÀÔ×׳{÷îëî“‚Ñµåæ‚½=|ú),^¬vš.¥¦¦†Ó§OsìØ1Ž?NTTEEEèèèàáá¡ìháëëˈ#ºõ`K´æÅÍwJihhÀÜÜœ±cÇÀ¤I“¸ë®»:킎|}«¨¨ÀÈȈ={ö0wîÜ6?¾,0âÎÔSÏ ±Œe¼Å[jÇé±´…"±±±JÁÈ¥K—HMM¥®®SSS\]]qqqiQ(âêꊽ½=½{÷VùYÑù¶(úh~ÉÌÌTgmm­,hÞÉG ?îLvv6ŽŽŽ|þùç<ôÐCjǹmñññxyyŘ1cÚåktÔxÝÅÅ…¥K—òú믷Û×ñúíKMMÅÍÍ?þ˜¥K—ªGˆvU]]Í©S§8vìáááœ:uªÅ<ž¯¯/>>>Êf/ýû÷W;²è"ˆçÌ™3DGGsæÌbbb(//G__ŸáÇãïﯜ”—E£7¶›Ý,bKXÂV¶Òy*„Yw,iMii)çÎkqIHH ¬¬ øeNÕÝÝwww<<>>lÚ´éºû¤ Dt}óçCI üø£ÚIº>¾ÝºÑÑ™Ýê<Þˆ#022R9µPK}}=iiiÊ<^rr2±±±ÄÄÄPQQžžC† i1—çëëÛ£NL߉­låižf+øéEטBˆž¬§„ÜHQQ‘2h>6Оç033kunÈÅÅGGGy&:íÏuk—ÔÔTåg[û~ÉÅÅEyÏäâ₳³s—9—ÝS466rñâEes„¨¨(RRR€_þGŨQ£”"gggu !D•œœ¬œ={–³gÏ’ €³³³²yðĉñööî±çäÏ;‡¯¯/Ÿ|òIÚ䪢¢KKK>üðC–-[¦v!ÚMGÍ8;;³bÅŠVÏÃKAˆèúBCaî\HJWWµÓt;W¯^%::Z™Œ'11‘ÚÚZz÷î““^^^¸»»+»r»¸¸0hРUÁÛÕÖÖ’’’Òb²,11‘¸¸8RRRhllD__wwwewhooo|||ºüdKG¿¾-Z´ˆÒÒRöíÛ×æÇ–fBܹK\Âr™ÌT;ޏ999\¹r…äädÒÓÓÉÈÈ ==´´4222(((Pkhhˆ££#vvv888`ccƒµµ5ØÚÚbii‰•• Pñ q½ÊÊJ²³³ÉÎÎ&77—¬¬,rrrÈÉÉ!##ƒ´´4233¯ûy4hvvvØÛÛ3hÐ ìííqrrÂÕÕUNê«è»ï¾#00„„ÜÜÜÔŽsÛ–.]J||•‚.ÆÊÊ +++üüüZ½¿ªªŠÔÔT233ÉÈÈ 55•ŒŒ 233‰‰‰!77—¼¼<åDüÒmÄÒÒkkk¬¬¬°´´T Fˆ¹¹9 P.&&&õtE7QUUEAA………Êu^^¹¹¹äææ¢ÑhZÜ®¨¨hñùæææÊϦ££#Æ ÃÞÞ±··—¦NlÖ¬Y899±yóæVÛ¼v=ô³gÏæòåË b+ÔŽ$„BÜ1†ÎðáÃ[½?;;»E‘HJJ ©©©œ>}F£œ'†_Î[YYakk«Œ+­­­±³³ÃÂÂ333eŽUºÑöL%%%ää䟟O~~>yyydff*EYYYdff’››«t÷€_æ?mllpppÀÝÝ]™OÓ^¬­­U|V¢½™™™1mÚ4¦M›¦|¬¶¶–¤¤$¥ƒfdd$›7o¦ºº===ZtÐôööÆÃÃ~ýú©øL„¢óÒþ]mÞIN;¯XYY‰®®.nnnøúú2wî\|}}5j”ü]ýÛ·oçøñãDFFö¸uaÛ·ogöìÙR "D((( ºº[[ÛVï—U¼¢ëÓÕ…G…O>W_ÙÍ®Ýéêêâî»ûu÷]ÛŠõêÕ«$''sôèQÒÓÓ©­­U;`ÀlllpttTŠDlmm±²²bàÀÊE¬Ýš‚‚òòòÈÏϧ  €œœ4 h4ÒÓÓÑh4*Ÿ£¯¯ƒƒƒ²;ÊìÙ³[,055UñuÓ¦MãÅ_äÌ™3Œ=Zí8BˆV<Á<É“¤“ŽjÇmÄÐÐPÙ!ùFššš”Âk»0h¯£££ÉÍÍ%??¿Eñü2nÒ‰ÜèÚÌÌ ###Œ•kLLLzÜ„PwQVVFii©r]ZZJqq1eeeJ·Žªª*òóó[~h?~-sss,--±´´ÄÆÆ___,,,”èVVVØØØ`aa!;Üuq:::<û쳬Y³†·Þz«Ë.¾6m–––ìÚµ‹W_}Uí8·møðáXZZrèÐ!)éD’’’رcÛ¶m“ „¸ ::: <ccc²³³Ñh4œ;w===‚‚‚˜m!ꀔ¹<é4qk ”EpÍÂeff*syYYYäåå)Ÿ£««‹­­­2w7a„ţáö²—ì ˜`µ# !„ÂÚÚkkkÆ×êýUUUh4²²²®ëæpñâEöïßOvvv‹óÅðËyJíXR;ví¹bSSSLMM111QnËÌ_|¡v!º…ÌÌL)ÝÜóÏÆ ðé§ðôÓj§éÑÌÌÌðõõmµõQSS“Rœ ÝÉN{R3--ÈÈH233)))iñy:::-&ý,,,°°°ÀØØ333Œ¯»˜˜˜(÷uµ õõõÊ¢Á’’eaóKQQ¥¥¥äåå)ÅÚKóRLLL”ÝmmmñõõÅÖÖ{{{åc²SŠº†Š­­-”‚!:©E,âu^çïüø@í8¢õêÕKé42tèÐß||ii)………Ja测ý ÉÉÉ!>>^ùwQQMMM­¯ÿþ)…"¦¦¦Êx§ÿþôë×¾}ûÒ§OLLLÐÓÓÃØØ éß¿?zzz˜™™¡§§Gÿþý144”“f@cc#%%%ÔÔÔPYYIyy9uuuSWWGYYUUUTWWSVV¦ÜW[[Kyy9ÅÅÅÊØ¬yHó¯edd„ŽŽÅÅŘ˜˜0hÐ †ŠŸŸß¯ IaPϲ|ùrÞ|óM>ýôS^|ñEµãÜ]]].\È—_~Ù¥ Bz÷îÍŒ38pà«V­R;Žø?kÖ¬ÁÍÍx@í(Btzìß¿Ÿ¾ÿþ{tuu™6mÿþ÷¿™7oÞoîÄ\TT¤Ìáeee)›ŒdffráÂ¥HúÚ±¬‰‰Éu‹øÌÍͯ›¿Ó.èkþ±®¸“`mmm‹9»æEÁÍ‹ƒKJJ®›Çkm.ÏÈȨEÑ͈#”›ï®-cDuSÌ\æržó♨v$!„¢Ó044ÄÕÕWWWššš8þ<‡æÂ… üüóÏTUUáééÉ”)S?~<ÞÞÞ-:Däçç“““C^^©©©ÊÇ Zýz­Šhoзo_LLLèÓ§2×j``€‘‘ýû÷ÇÀÀ@‡ö”…µµµTTTPZZJuu5åå唕•)ó åååTWWSZZJEE555×{4¿]]]}Ý×èÕ«æææÊ¹ýâììÌØ±c[œó·´´Tî—9kÑ–ôõõñññÁÇǧÅÇëêê¸|ù2qqq$&&Ç‘#Gøè£”nÜ–––xxxàìì¬Ýk ðmllÔx:BqÛ4² Ló aÉÍÍ~ÙF»‰âòåËñððÀÓÓ“ÁƒËÆ/m`íÚµTUUñöÛo«¥Ã}ôÑG˜˜˜0wî\µ£Ñ-\¹r…Þ½{3hРVï—‚Ñ=X[òe°n,_=d²¦«éÕ«—²s̵o¼›«©©Q&÷´;r_»K^BBB‹ÂˆÒÒÒëvÑêÓ§²Pòf®¯ejjJ¯^½nê9655Q\\|ÝÇ+**”ɵÚÚZ*++•Ň­]·F___99®-v8p ÞÞÞ-N°_¼x‘ÿûß³lÙ2^}õUìíío*¿PG¯^½ b×®]üùÏV;Ž¢zèñ"/ò'þÄ«¼ŠjG”öµÚÉÉé–>¯¼¼\)((++SN¨5/4Ðh‹B¯^½Jyy¹2~())¡®®ŽÒÒÒ›úšÚ¢ E‘Hó“ŸFFFÊîëÍ;–üÖøÈÌÌì–žk´ã¦ÑpÀ/…8Ú…tÅÅÅÊ¢Dmq†¶ø~)¾-++»© Ú±1zzzÊÉã~ýúajjŠƒƒC‹î.FFFÊ8­y!vÁc¯^½¨ªª",,Œ¾ýö[Ο?——ÁÁÁÌž=OOÏÛþž‰îÁØØ˜Gy„M›6ñüóÏw¹w­‡zˆ>úˆ‹/ÞTA]g5gÎ{ì1JJJºlÇ–î$..ޝ¾úŠ;vÈBh!n ??Ÿï¾ûŽ<ˆžžÓ¦Mã“O>aþüùÝô±ÌÌÌ033ûÕ¿ã­v¸hÞ½6;;›¸¸¸¿6ÓŽ›´ãÏk¯µÀ´¶Œ¾¾þ-•hçë®URRBcc£2ÎÔ^—••)ãÉæ×­éÓ§ÏuÅ/dèСÊN×Í7¿ÑÎíÉN×W )DEç8Céºc!„¢=äääpìØ1ÂÂÂøî»ïÈÈÈ`àÀL™2… 60kÖ¬.\ù-ÚâƒÖ ®ýXrr2%%%TWW+…UUU­Žû®¥ÕŽ7å<2üÿyÏæç•mƒÂæó«·ª®®ŽòòòVïÓŽQç—µó¡ÚóÎðÿçNov^ôÚB¥ÈÆÁÁ¡C‡Þ°§ùµ‘žžžžž×ÍÃ755‘––FBBñññ\ºt‰«W¯Ijjªò;f``p]‘Hó‘þýû«ñ´„=Xyyy‹Bk‹?´…›}úôÁÉÉ ggg† Æ‚ ðôôÄÃÃGGÇ›^'nÍéÓ§ùàƒزe T;N‡ª¬¬dË–-<ûì³Êš!ÄILLÄÑÑñ†¿S½šn´íMˆŽŽhµ€.=†>ú¥8Dô8ÚJJKK•‰¿ÒÒR*++[d\[€Q]]­ìþ\UUÕâ˜Í'ÓšÓžèmm¯µI?íbBíõµ…(Ú…—Ú]»[Û%ñVvE©­­eçά]»–ôôtxàÖ¬Yƒ««ëM£'Sãõ-<<œ &pîÜ9†ÞfÇݺu+O>ùd›Oˆž¬’Jœpâ)žâMÞT;Ž¿J;q[ ‡ IDAT®¹¶³EEEÅuªpãâ íB8h½¸¢57ZT×\EEÅo.ÔÓv4¹‘æ'bo¦ˆE{²¶W¯^Ê"CSSSeÑ ö¦¦¦èééÝÒbÉÛÕÐÐ@dd$!!!ìÚµ‹ììl\\\ $88™€í¡’’’ððð`÷îÝÌ›7Oí8·¥±±æÏŸÏ´mw­Ž¯çççcmmÍ®]»X°`A›_Æë·æ 66–óçÏKAˆÍddd°{÷nBBBˆŒŒÄÀÀ€©S§Ì‚ :å‚í¦*Úî¸ÚBhm¡tCCƒ2½ÑuóB`­m¶R]]ÝêÜš¶ÐäZÚ1å®›¥hçò®ífÜSv˜î)"ˆ` °Á†oùGÕŽ$„âÜì{1Yñë*++9qâaaa„……qæÌttt;v,AAALŸ>Q£Fuª÷o¿Ö£®®Nk6/ÆÐÞ×|^T{nhu\ 7ÞHPK;ûk;oßhcíÜ&´ÜøG;Ú|nUÛEZ;öÕÞwm·CCÙ¢«ill$33óºÅÖÚ묬,å±–––8;;3hÐ lmmqttÄÖÖ{{{ìíí±±±‘÷ŠBˆ›V[[«tíÍÈÈ@£ÑžžNff&iii$''“——§<ÞÖÖöºîFÚk[[ÛN5&ë jjj=z4–––„……õ¸s¾[¶láå—_&%%+++µãÑî:bþ`éÒ¥dggsðàÁVï—‚ѽ,_GBBÜæNBt'uuuìØ±ƒ·Þz‹ÔÔT|ðA^ýu¬v´NM×·¦¦&† Âìٳټys›W˜ Ѷֲ–l$…Œ#Bˆ¶ÓØØÈ‰'Ø·oÿûßÿ¸rå ƒ bîܹãçç'µ=LPPùùùDFFªå¶ýõ¯å­·Þ"==½Mw†ìèñºŸŸîîî|öÙgm~l¯ß¼ØØX†NHHH»çÑÕ¤¥¥±gÏBBB8qâ&&&̘1ƒÀÀ@.\xK]2„¿n';YÆ2&3™ìÄcµ# !„¸CRr{9{ö¬RrüøqjjjpqqaúôéLŸ>Ù³gKQ¢G©®®¾®P$-- FCZZÙÙÙÊ&\½zõÂÊÊ ;;;ìììpppP F´·”DBˆî«²²Rù[‘‘‘q]ÁGff&999JÑ©®®.VVVJ¡™££ãu…·²Ñ°h¯¼ò ›7oæüù󸸸¨§CUTTàááÁܹsÛt œYGÌŒ?ž1cưqãÆVï—‚ѽ\¾ °m,Z¤v!:ÚÚZ¶mÛÆ;ï¼CFF<ò¯¾újpÞ,µ^ß>üðC^yåRSSÛ¬U ,0¢mRˆN¼Æk¬b•Úq„ÝXll,!!!|õÕW$$$`aaÁìÙ³ föìÙ¿ºs è¢££¹ë®»8xð 3gÎT;Îm)--ÅÁÁ5kÖðÒK/µÙq;z¼þ÷¿ÿ7ß|“œœœ6?¡"ãõ›·páB’’’ˆ‰‰‘9Ñc¥¦¦òÍ7ß(E ¦¦¦Jg±™3g*ÝÓ„m£‰&ÞäMÖ²–çxŽø€ÞÈkBtRró®^½JXX‡æ‡~ °°f̘ÁŒ3˜>}:ÖÖÖjÇBˆN«¡¡ììlÒÓÓ[,ü¾|ù2—/_F£ÑP\\¬À/Ïmll°°°ÀÆÆKKK,,,°¶¶nqÛÊÊJŠG„èD***ÈÉÉ!''‡¼¼F BD÷““îîðüóÐF ª…ènêëëÙ¹s'ëׯ'66–ÀÀ@^yåüüüÔŽÖ)¨ùú¶~ýzÖ®]Kbb"öööw|Ö­[‡¿¿?þþþ¬^½šÀÀ@Yd£’•+W²uëVV¯^ÍöíÛÕŽ#„hE_úò6o󱂌`„Ú‘„=ˆ““/¼ð/¼ðyyy8p€–/_Îï~÷; äÁÄÊÊJí¸â7ŽY³f±fÍš.[âììÌܹsyï½÷¸ï¾ûºìûŒ|U«VQQQ!»èu°3gÎðí·ßÚe~„¸Í;‚EDD0pà@æÌ™ÃêÕ«¥Dˆôß±„%8à@Q8ã¬v$!„¢MµÖÄÐÐzè!¦OŸN@@jGBˆN©¶¶–¤¤$¥è#66–¸¸8®^½JSSÆÆÆ 2///¦OŸ®¸¸¸´É×ïÓ§¶¶¶ØÚÚâííý«mll¤   E‡ƒ¢¢¢ëþ]PP@RRR‹ûZ«wïÞ˜™™abb‚±±1FFFôïß###LMM122jñ1333åvÿþý166ÆÔÔ”þýûK·SqSjkk)//§¸¸˜ÒÒRÊËË)++£¼¼œ¢¢"åvYYeee·øXii)%%%ÑØØØâØ:::˜››·èžcnnŽ‹‹ £Gnñ±æ0`½{÷Vé;"º‚ÿýïüãÿ`ûöí=®àý÷ßç§Ÿ~"""Bæ³…hc§NbРA¿ZL,BD÷T_>>`k ªFˆ.!<<œõë׳ÿ~† Æïÿ{-ZÔ#wyPûõ-44”{ï½—={ö0oÞ¼;:–ì8,Dûh¢‰ L@}~äGµã!ùùùìÝ»—={öFCC“'OfÞ¼yÌ;·M: uh»„ìß¿Ÿ»ï¾[í8·åÂ… Œ9’;vpÿý÷ßññÔ¯çååaooÏÇÌÒ¥KÛì¸2^ÿmäääpêÔ))ÝNll,!!!„„„‡……³gÏ&88˜9sæôÈ9!ÔÒDïñ¯ò*‹XÄÇ|L_úªK!D;éIBš€:tˆ””¥$ @ @„âÊÊÊHLL$>>žØØXå’’’BSSýúõÃÓÓ“¡C‡âííͰaÃðòòÂÁÁAíèm¢¤¤¤EáHaa!ÅÅÅ·´8ÿFKûô郑‘ÆÆÆ˜˜˜ ££ƒ©©)zzzôïß éß¿?zzz˜˜˜ ««‹‰‰‰òCCC ”Ç˜šš¢££ƒ‰‰ ½zõÂÔÔ´ƒ¿c=‹öÿ·¤¤„úúzJJJ¨««£¼¼œªª*ª««)//§®®Žââb())¡¶¶–ŠŠ å1eeeÔ××S\\L}}½RÄQ^^Þjw@ùÿ½Ù"$SSS¥˜C[ØabbÒÁß1Ѥ¥¥1jÔ(|ðA¶lÙ¢vœwæÌÆÏ›o¾É+¯¼¢v!:\{ÏÌ;—>}ú°k×®>F BD÷uü8Lšß~ j§¢Ë8sæ ëÖ­c÷îݸºº²zõj/^Ü£ZAw†×·eË–±ÿ~Ο?G»{Ë3!ÚO4ÑŒa !„°€jÇBEii)û÷ïgÏž=8p€ŠŠ FÍüùó™;w.^^^jG·(88˜˜˜.^¼ØeÇå>ú(Ç'!!áŽw€Sk¼þÀ‘‘ADDD›SÆë¿.::š»îº‹0kÖ,µãÑ&´E _}õ 8880þ|‚ƒƒñóó“]…PA,f1G8ÂzÖó/¨I!D;ëÎ!ÉÉÉ„‡‡¡€ôíÛ—Q£FIˆB´";;›øøx”ë„„ÒÓÓ_Š<<<ðòòbذax{{ãíí³³³¼‡ÿ ååå×ukh^8¢ýw}}=EEEÔ××SVVFuu5UUUTTTP[[Û¢è@[Pp³š‡´Õí122úÍÍ=næ1ZÚïÇ>¦¨¨H¹Ý¼PçNnßŒÖ zôõõéׯ}ûöU ƒtuu133CWW###LLLZz\Û•¦ÿþ7AˆŽRWWǤI“(//'** CCCµ#u¨ÌÌLƇ‡‡ßÿ½¼>Š©½çìììX¹r%«V­ºác¤ Dto>QQRÝ+Ä-INNfãÆlݺcccžzê)ž}öY¨v´v×^ßJKKñññÁÖÖ–°°°Û^4' Ì„h_ðG9ÊyÎcŠì4#„è|ª«« '44”²²²pvv&((Hžv!éééxzzòÆoðòË/«ç¶dddàææÆ{ï½Ç³Ï>{GÇRk¼ÆŒ3¸páC‡m“cÊxý×Í™3‡’’Nœ8¡v!dÇŽ\ºt GGGæÍ›Gpp0þþþÒýFá ÷qõÔ³‹]ŒcœÚ‘„Bt€îRÒÔÔD||<üôÓO=z”ÌÌLúö틟Ÿ“'OfòäÉÜu×]w¼9ƒBtu†¸¸8bcc‰‹‹#99™ .““€‰‰ ƒÆÅÅ///¼½½ñòòÂÃÕӋk]Ûa¢yAIó"‰›¹ÝÐÐ@iiéMßnMcc#%%%¿šùfs-“ß<‡ó[166V~†oõvó–ÖnkÛ¼ CÛáEˆžä¥—^â_ÿúÑÑѸ¹¹©§C•••1qâDjjjˆˆˆÀÌÌLíHB¨¢=ç233±··çÈ‘#Lž<ù†“žó¢{ûðC1–/‡µÓÑ¥¸¸¸°qãF^}õU>úè#6oÞ̺uë¸ÿþûyå•WdgévfllÌ7ß|ƒŸŸÏ>û,[·nU;’¢ÙÈp†óÏñ_¨G!®c``ÀôéÓ™>}:|ðgÏž%44”¯¾úŠM›61pà@æÌ™Cpp03gÎì²Ý'º;^~ùeÖ®]Ë¢E‹°µµU;Ò-³··çé§ŸfíÚµ<òÈ#«é–M›6ÁƒóÉ'Ÿ°aõãt{‘‘‘˜BgǦ»7±ŽšãõuëÖñÞ{žN¿~ýîøx2^¿± 6P[[Ëþðµ£qSšššHKKãüùóDEE‘——Ç€1b¾¾¾¸ººv»ù!ºªRÃR>Ÿô9qöqÜsæî9{½šä÷S!z’ŠŠ ^|ñÅß|œÚB²³³9}ú4„‡‡óóÏ?SSSƒ ¾¾¾àïïϘ1c¤ˆ¢GÉÏÏ'>>ž„„ˆ‹‹#11‘ÔÔTÑÓÓS:}xxxàé鉇‡2'„¢ÍDGG3qâDž}öYÖ¯_¯vœUZZÊ=÷ÜÃ¥K—8tè#FŒP;’ªjÏùƒçž{ŽÈÈH~þùç_}Ü„Ñe¼ü2üóŸ îîj§¢KklldÿþýlÚ´‰°°0FŽÉSO=Å#<‚Úñº¥ÿüç?<þøã¼óÎ;¬^½Zí8BˆV¬`»ØÅyÎc½Úq„â–¥¥¥qðàABCCùþûïéÝ»7&L 00ààà.Ù‘¢; áàøñãøûû«ç¶lÙ²…•+Wõ›»˜tF………8::²~ýzžyæµãt[üfëc!ÔÖØØÈ‰' a÷îÝdffââ⢼~¨QqCb)KÑGŸ/ø‚ LP;’BˆN¬# Bjjj8{ö,QQQœ:uŠˆˆRSSÑÑÑaøðáøûûãç燿¿¿lØ)„芊ŠHNN¾îrñâE²³³èÓ§®®®x{{+]?´… ²á‘Bˆö¤Ñh3f C‡eÿþýèè訩ÃäääDFF?üðžžžjGBuí9àååEPPÐožIAˆèêê àÄ R„h§OŸæý÷ßg÷îÝX[[óÜsÏñøã3`Àµ£u;›6mbåÊ•¼õÖ[üéOR;ŽâTàƒƒÄ÷|O/dgQ!D×UXXÈ?ü@hh({ö졲²’Q£Fȃ>ˆ‡‡‡Ú{´9sæpùòeΞ=KÿþýÕŽsË™4iåååœ>}]]]µ#ݲ§žzŠï¿ÿž¤¤¤5Áß‘¦L™B}}=ÇW;Š×i^BVV^^^¤ÚîÑBˆ_WM5«Y͇|È}ÜÇV¶bŠ©Ú±„Btrí¹ #))‰S§NETT111ÔÖÖbnnΘ1c7nþþþŒ;Vv³Bt[EEE$%%qùòe’’’”ËåË—),,@__ggg† ¢\Œ»»»È !„PEUU“&M¢¬¬Œ“'Obbb¢v¤ÍüùóÑ××çàÁƒ È?þHTT®®®jGê0Û¶mcÅŠ°sçNÌÍÍÕŽ$D§Ñ^!Û¶mã‰'ž °°~ýúýêc¥ Dô,;w¢E°y3<ý´Úi„èvÊËËùïÿË–-[8þ<¾¾¾<ùä“,Y²DZ²¶‘–,YB`` Ÿþùo¾Ð !:Ö»¼Ëë¼Î0éjÇBˆ6¥] »oß>vïÞÍåË—qttdöìÙ2{ölôôôÔŽÙ#ìÙ³‡… ²wï^‚‚‚ÔŽs[Þ|óMÞ{ï=Ο?ß%'ËçÍ›G~~>ááájGév&OžŒ¾¾>‡R;Šèáš|õÕWäää(E Ò1Kˆ®¡žzÞç}Ö°†QŒâK¾Ä•®7îB¡žÛYÐQ\\ÌÅ‹‰ŽŽV.ñññ455accƒ¯¯/øûûãëë+çÏ„ÝBaa!W¯^mµÓG^^ðKч““ƒ¾®ÛÇ Aƒºd'a!„=ÏÊ•+ùè£8xð S¦LQ;N‡(((àé§Ÿ&$$„—^z‰õë×K±¦×h¯‚G}”´´4Ž9ò›•‚Ñóüå/°f ìÞ sçªFˆn+::š7²sçNÌÌÌxì±Çxê©§4hÚѺ¼cÇŽqß}÷akkË7ß|ƒ“““Ú‘„ÿ§‰&³˜ Š(†0DíHBÑ.ššš8sæ {öìaïÞ½\¼x‘HPP³fÍ¢ÿþjÇìÖ}ôQ8Àùóç±¶¶V;Î-«­­ÅÇLJòÃ?t¹‰ãˆˆ8rä“'OV;N·qøðafΜɱcǘ0a‚ÚqDÔ¼dçÎäææ*E ‹-ÂÍÍMíˆBˆ›M4ËXÆe.óoñ/ C×o!„Pßo-èÐh4DGGsæÌå’‘‘€““>>>øøøàë닯¯/–]!ÚRUUYYY­vø¸rå ÅÅÅèêêâèèx]§¼½½100Pù™!„·ï/ù ¯¿þ:;vìàþûïW;N‡Ø»w/O=õ:::|úé§Ì˜1CíHBtJíQR__ «V­âøÃo>^ BDÏôÔS°m9cƨFˆn-;;›Ï?ÿœÍ›7£Ñh˜:u*Ï?ÿ<Òòú¤¦¦2oÞ<222øüóϹûî»ÕŽ$„ø?ÕT3‰I”PÂINbŠ©Ú‘„¢Ý]¾|™={öðí·ß‰žžS¦LáÞ{ï%((;;;µ#v;¥¥¥Œ1‚áÇóÍ7ßtɱõ… ;v,øÃxã7ÔŽs˦N À?þ¨r’îcâĉôëרEô 555:tˆ¾ýö[JJJ”"‡~˜!C¤È[ˆ®¤šjÖ±Žwx‡±Œå>Áwµc !„袴 :† F\\.\àÂ… œ;wŽóçÏ“ €«««Rø¡-0`€šÑ…â–“’’Bjj*)))\½z•””åRRR¢<ÖÆÆ''§ë.ÎÎÎ899Ii!„ÝÒ¶mÛXºt)|ð/¼ð‚ÚqÚ]||<+W®äðáÃ,Y²„7bj*k_„¸‘ö(Ñn¤wùòe\]»óµ„ˆž©®áÂ8yÕN$D·W[[Ëîݻٲe xyyñ»ßýŽÅ‹cnn®v¼.©¢¢‚§Ÿ~š/¾ø‚_|‘wß}}}}µc !€,²Ã¼ñf?ûeR!DRPPÀ?þHhh({÷´///‚‚‚ Äßß¿K/tFÇŽcêÔ©¬[·Ž—_~Yí8·eË–-<ÿüó:tˆiÓ¦©ç–h»„üôÓOLœ8Qí8]ÞÁƒ™3g'OždìØ±jÇÝ\uu5‡&$$„½{÷R^^ΨQ£ dÉ’%75±.„è|"ˆ`9ËÑ a-kyŽçèMoµc !„èbÒÒÒ”Â#GŽpåÊÒÒÒ¨««C__///† ÆðáÕâY%„è쪫«Ñh4­vøÐh4dee)533SºzØØØ`kk«üÛÝÝ]:C !„èq¾ýö[.\ÈŸÿügÖ¬Y£vœv•™™É»ï¾ËÖ­[:t(›6m" @íXBtzíQò»ßýŽŸþY9ö‰nr{ IDATo‘‚Ñs•”€öÅêÀ°·W7=HLL ÿøÇ?عs'µµµÌŸ?ŸÇœ©S§Ò»·œ¤½UÛ¶mã™gžÁÝÝO?ý”áÇ«Iœâ“˜Ä<Á&6©G!TQ]]Mxx8¡¡¡|ýõ×dddàèèÈìÙ³ dæÌ™ôéÓGí˜]Ú_ÿúWþøÇ?²oß>fÏž­vœÛ²páB"""ˆ‰‰ÁÚÚZí8·dòäÉèêê¦v”.oܸqXXXªvÑMUUUFHHß|ó Œ?žàà`î»ï>éf%D–GæÏ|Â'ÌaÿäŸØ#óýB!~]zz:ñññÄÇÇG||<.\ ¸¸€Aƒáè舮®.ÇÇÑÑ[[[ttdó!DçRWWGaa!………äççSPPÐâvnn.ÊãMLL°´´Ä ,--8p –––XZZJ‡!„¢™˜˜þú׿2uêTüqµã´›‚‚öîÝË?ü€±±1÷ÝwS§N•Mþ„¸IÉÉÉ̘1Ÿ69^CC¶¶¶¬\¹’?þñ7õ9R"z¶ôt¸ûn((€ÿýüüÔN$DR]]Mhh([·nå‡~ÀÎÎŽ‡~˜+Vàää¤v¼.åÒ¥K,]º”Ÿþ™—_~™×^{ CCCµc Ñã…Â"ñ,ÏòwþN/äͲ¢g‹eß¾}„††râÄ ™:u*AAAÌ;+++µ#v9MMM,Z´ˆ°°0N:…³³³Ú‘nYQQ>>>xzz²ÿþ.5¹|ôèQ¦L™ÂÑ£G™4i’Úqº¬}ûöÄ©S§¸ë®»ÔŽ#º‘æE {ö졲²R)¹ÿþû±±±Q;¢âÔQÇf6³–µô¥/å¯,b‘Ú±„Bt" ¤¤¤(Úâ„„JKK°´´ÄËË †ΰaÃ6l&&&444(B5ÔÔÔ••EVVééé×ÝÎÌÌ$//Oy¼¾¾>ÖÖÖØÚÚboo­­-8::âèèÈ Aƒ000Pñ !„]ÇçŸÎþðî¿ÿ~6nÜØ-79މ‰áã?æë¯¿ÆÒÒ’gžy†Ç{L6ôâ6˜˜˜´Ù߉#GŽ0uêT.]ºÄ!Cnês¤ Dˆòrx䨻~Ö¯9,D‡KLLä³Ï>ã³Ï>#??Ÿ©S§òä“O2wî\ôõõÕŽ×%455ñ¯ý‹U«VallÌk¯½Æ²eËÐÕÕU;š=Ú.v±„%Èç|N¤•¶B¤¥¥qðàABCC9|ø0õõõŒ7Ž   î½÷^<==ÕŽØeTTT0qâD*++‰ˆˆÀÜÜ\íH·ìäÉ“Lš4‰U«VñöÛo«ç–Ìš5‹¢¢"¢¢¢ºT1Kg2fÌìììØ³gÚQD7PYYÉÿcïÎ㢺ïý¿dßY‡}APÄ}oPq#à’H¶fiÓĤɽI·é/mÚ^MÚ>Úæö6MÚ&51·MkŒ!q%®Ä%*’(Š (Ⱦð3Ì0ðûcœ#(&jÄòyÎãû8gæÌ Ÿ1Çœ™ïûûùüóÏIMMeóæÍèt:fΜIJJ >øà°ëD$„ØNvò_ü%”ðc~ÌOù©œo !Äf0(++#''‡ÜÜ\e{áÂeU|www¢££‰‰‰Q¶111B¨F¯×S__OUUEEETVV^³_RR‚ÑhÀÚÚOOOüüü C£Ñ\³,Œ„Bˆo©¦¦†çž{Ž­[·òË_þ’µk×ÞUßÿhµZ6mÚÄúõë9qâS¦Lá…^àÁ” ˆCÄOøà}ôQµË¹agΜaÒ¤I|øá‡<øàƒj—3ìlÛ¶U«VqòäIâââÔ.G SMMMlß¾´´4vîÜÙ/òÐCI*!î"G9ÊZÖò9Ÿ“DæÏ„¦vYB!îöövòòò¸pá999\¸pÜÜ\ 1 XZZBtt4ãÆcܸqÄÄÄ…³³³Úå !F½^Oee%ååå”––RQQ¡ì———S^^NMM æ)[ÖÖÖh4 Äßß_Ù __ß»rer!„b¨èêêâÍ7ßä׿þ5îîîüãÿ >>^í²n‹ÎÎNöî݈ رcÜwß}üð‡?döìÙj—'„裩© þô§?±fÍš~œB„諳ÞxþügS0dÕ*øÿ€yóÔ®Lˆ©²²’ýë_¬[·Ž¢¢"¢££IIIá‰'ž 44Tíò†¼‚‚Ö®]˦M› äG?ú?øÁpr’Õ…PCe<Áá¯ð ?å§Ø!m¹…âjF£‘cÇŽ‘––ÆöíÛ9þ<$&&’œœÌÒ¥KeÇuœ;w޹sç²xñb6nÜ8,W|饗øë_ÿÊþýû™5k–ÚåܰÇœ#GŽpþüùaÆQKoo/“'O&<<œO>ùDírÄ0£ÕjÙ±c©©©ìÝ»£Ñ¨„@~øa¼½½Õ.QqeÁZÖ’N: XÀ«¼Êæ¨]–BˆA¢Õj•.EEEÊ~qq1===XYY¤tú #::šI“&áèè¨vùBˆ»\gg'UUUvô¨³˜º]¯«GXXAAA² ŽB¡½^Ï?þñ^{í5xá…øùÏ>ì¿lhh`÷îÝlÙ²…Ý»wÓÙÙÉܹsyâ‰'X½z5...j—(„ÀŸÿüg~ñ‹_PQQqS¿§b z=lÛëÖAz:ÁÊ•’sçª]#NOO‡bÆ |úé§´´´pÏ=÷ðÈ#pÿý÷ãîî®v‰CZQQo¼ñï¿ÿ>ÖÖÖ<öØc¬Y³†ñãÇ«]š#N=¼É›ü’_âƒÿËÿ²œåj—%„CZQQ;vì --ƒbeeÅܹsIJJbõêÕøKgÇ~:IJeËx衇X¿~ý°kaÝÓÓêU«øòË/ùꫯ R»¤RRRBTT¿ûÝïxñÅÕ.gØøôÓOIIIáÔ©SLœ8QírÄ0ÐØØHZZš±°° !!””V¬X«««Ú% !n³Ãæ·ü–Ýìfóx•W‰'^í²„BÜäçç“——G^^çÏŸW®·µµ¦ Ô‘‘‘Œ7ŽÈÈHÆŽ˸qãÇÚÚZåW „¸ÛtwwS]]MYYUUU”——SQQAee¥r[YYÊcPº{øùùáïïO@@~~~Ig!„bˆºpáï½÷|ðmmm¬Y³†Ÿüä'Ãö»Ç®®.Ž?ÎÞ½{Ù³gYYYXXXÏ}÷ÝÇŠ+Ðh4j—)„ø&L`öìÙüýï¿©ÇI Dˆo’ 7¦MPRÑѦÎ!‰‰0c ÃW…κººØ»w/©©©lÞ¼™îîn-ZDJJ ÷ß¿¬üô5y÷Ýwyï½÷(((`öìÙ<õÔSÜÿý’úâ« ‚óc6±‰éLg-kI$Qí²„bÈ«­­U:‡ìÛ·NÇÔ©SINN&11‘I“& »Ä`صk+W®ä™gžáÍ7ßT»œ›ÖÚÚÊœ9s5j‡ÂÍÍMí’nÈÏ~ö3ÞyçòóóñòòR»œ!¯··—I“&ÅG}¤v9bkhhà³Ï>#55•={ö`ii©„@V®\)ç³BÜ…ŒÙÌfþÈù’/™ÃÖ²–E,R»4!„·@«ÕöëòaÞÏËËSVÏ×h4ý:}˜÷ÃÂÂT®^q·¸º«Ç@Ý=JKKéîîVãîî®tñ¨³‡F£A£ÑÈç‘B!Ä0ÒÙÙÉ'Ÿ|»ï¾ËáÇ åÉ'Ÿäé§ŸÆÇÇGíònJee%ÇŽ###ƒÌÌL²²²èêê"44”Å‹³xñb.\( ) 1Œ|ñÅÜsÏ=?~œ©S§ÞÔc%"Äêí…cÇ࣠- .]‚Ñ£añbX¶Ì´õõU»J!F”ŽŽ>ûì3>øàöìÙƒ••III<öØc,]ºTV‡ºŽÞÞ^öïßϺuëØºu+–––$%%ñÈ#°lÙ2lmmÕ.Qˆã gø5¿æ>!–X~ÎÏYÍjF!_!Ä7éììäóÏ?gûöíìܹ“ŠŠ üüüHLL$11‘E‹áää¤v™ªùôÓOy衇xî¹çxã7†ÝÓ%%%Ì™3‡öîÝ‹ƒƒƒÚ%}£ŽŽÆÇ’%KX·nÚå y›6mâ‘G!;;› &¨]ŽbêëëÙ¹s'©©©ìÞ½kkk.\HJJ «V­ÂÙÙYí…ƒ 6>äCþ—ÿå"I$‘y‘Ô.M!Ä7hiiáâÅ‹\¼x‘üü|eäååÑÒÒ€««+‘‘‘DEEÅØ±c‰ŠŠ"""•_b¸êêꢦ¦†òòrª««©¨¨ ªªŠŠŠŠ~Ý=̇ìììúuò¨»‡F£‘¿MB!Ä]¢««‹ôôt¶nÝʧŸ~J[[+V¬à©§žbÑ¢Eâ“Wcc#gÏžåôéÓdff’‘‘AII –––Œ?žÙ³g3kÖ,fÏžMxx¸Úå !nQbb"­­­>|ø¦+!nUQ¤§ÃްotuAXÌ™sçÂ’%¬v•BŒÕÕÕlÚ´‰7òå—_âååÅòåËY¹r% ØÙÙ©]â¤Õjùä“OøðÃùâ‹/puueùòå¬ZµŠÅ‹coo¯v‰BŒ'8Á«¼JiLa /ð)¤`‡üíBˆ•““CZZéééÐîëâŋ̛7 &––6,‚Ë~ø!=öÇŽcúôéj—3dõôôÇ„ ذaƒÚåˆ!¢®®Ž]»v ¹ï¾ûFtÈOˆ»]>ù¼Ïû¼Ç{´ÑÆ<À+¼BQj—&„¢®®.%ôa~˜·ÕÕÕXYYJDD‘‘‘ÊˆŠŠÂWÖBܽ^Omm-eeeJàãêàGuu5uuuýçííAAAh4š~ómžžž*½*!„BÜ)ÍÍÍìÚµ‹-[¶°k×.ÚÚÚ˜:uªò½ÙPíÒÞÞNnn.gÏž%''‡³gÏrîÜ9ªªªððð`ƌ̚5‹Y³f1}útY@Iˆ»Ä©S§˜2e ;wîdéÒ¥7ýx „q;´·ÃáÃðÅpè?DD˜"3fÀÌ™0~M||<óçÏçã?òõ÷ööÁ`àÈ‘#Ã.„s§|øá‡<þøãœ;wލ(™ì;’•••±yófRSS9vìvvv,X°€””î¿ÿ~Õ.Q1HôèÙÂÞáqˆPBYÞâ)<ðP»>ž•+W²bÅ Ô.NGAA*û”––ÒÓÓƒ½½=ÑÑÑL˜0˜˜bcc‰‰‰Áßß_íò…ƒä¾û¸˜¬¬,FuÓ—@ˆƒ¡½22L‘Œ S@¤µaÊS8dÆ ˜<BBÔ®Vˆ»ZßF÷íÛGww73gÎ$99™û™d=êêj¶mÛÆ–-[8pàÝÝÝÄÅűxñb/^Ìœ9s¤M²ƒ¨Š*Þç}Þå]J)e! y†gXÎrlß=!„¸F£‘ììl%ròäIìíí™={6IIIÜÿýCæàÁ²gÏî¿ÿ~âããÙ´iÓ°ûbüàÁƒ,[¶ŒG}”uëÖÝÒ`wÒ™3g˜:u*o¾ù&Ï>û¬Úå 9F£‘ñãÇ3}útþùϪ]ŽPAii)[¶l!55•ŒŒ \]]•nN÷Þ{/j—(„DyäñOþÉû¼O=õÜ˽ü²˜ÅX AJ!„¸ŒF#eeeý&åç瓟ŸOQQƒ0­²Áرc;v,ÊЇâjÝÝÝÔÖÖ*;***ú<Ì·×ÔÔÐwšÒèÑ£•N¾¾¾øûû_sÝ××wÄ,î"„BˆWTTÄþýûÙ¿? ºº777.\¨ê¢Á”––RZZJYY™²_\\LAAåååÊû!ÆŒCxx8cÆŒ!22’ØØXÂÂÂdÑ1!FsçÎ˧Ÿ~ʪU«né9$"ÄRTGŽ@V= ÙÙ`4‚‹ L˜` ŠL™11¦ë2ÑZˆÛÎÜpëÖ­ìÚµ‹––¦L™ÂŠ+X¾|9±±±C~r™:::ÈÈÈ ==ôôt²²²ppp`öìÙ$$$ÀäÉ“åßNˆAÐCûÙÏ:Ö±…-8áD2ɤÂR–bµÚ% !İSRRž={HOOg÷îÝ´¶¶Mrr2 ÄÇÇù.·âøñã$%%LZZÞÞÞj—tSvìØÁêÕ«yüñÇùûßÿ>ä?ÿéOÊÛo¿Mnn®¬Öt•>ø€'Ÿ|’œœ"##Õ.GÜ!%%%lݺU ¸¹¹‘””DJJ K–,‘„¸Ë5ÒÈ&6ñI&ð}¾ÏÓýôS***ðòò">>ž„„’’’ðóóS»Ì!éÒ¥KìÝ»—½{÷²ÿ~ššš bñâÅ,\¸¹sçÞõ«l ¡†2Êø˜ùˆ8Á ¼ðb5«y™Ç>ž™3gJ@díííœ:uŠ£GräÈ222hllÄÉɉ‰'2wî\æÌ™Ã¼yóäK(!n“\rÙÄ&¶³l²qÄ‘…,$‰$IĵKBˆaI«Õ²gÏ>ûì3vïÞM}}=¡¡¡$&&rï½÷?lW:ìêêâÙgŸå_ÿú¿ùÍoxùå—Õ.馘C! |ôÑGßêƒÿÁöýïŸÝ»wsæÌ¼¼¼Ô.GUƒ¨¨(.\ȺuëÔ.GÜF999¤¦¦’––FVVžžž,[¶Œ””–-[&“…„¸Ë1rƒ|ÌÇlf3õÔ3i#4ö³ŸN:™Ä$Id˘Ît¬ yBq³ŒF#_}õŸ}ö;wî$;;;;;î¹ç–,YÂÒ¥K‰ŠŠR»Ì›öúë¯ó³ŸýŒx€uëÖáää¤vI7,##ƒÄÄD¦L™ÂæÍ›¯ùP~¨hnnfâĉÄÅűuëVµËQÕ»ï¾ËóÏ?O^^žœ;ÝÌ!?þ˜óçÏ@bb"IIIbС#t¶±íl§–Z&1‰yx€Päï¼B˜õööö w˜‡ùzEE…ÒåÃÍÍÐÐPe„……õ»>”ÃðBˆ›s£!ÊÊJZZZú=ÖÙÙ???¼¼¼”‡——___|}}•°‡¬X-„B5•””››Kvv6§N";;›ÂÂBzzzpwwgÒ¤IÄÅÅǤI“ˆŠŠº¡Ï–kkkû…=.]ºDAA°7^ž7éää„F£ÁÓÓOOO\\\pqqÁÉÉ º»»imm¥££ƒ®®.šššÐëõ´µµõ èõzÚÛÛoË¿‰»»;8::âìì¬FÌïãÌ]oooå6 ’qwª¨¨`üøñ<õÔS¼þúëßúù$"Äݨ³Ó ),4‚‚+ûeepùƒe¼½!<Ü4ÆŒé¿?ÂW.â딕•qäÈ¥Mavv6ƒ___%2sæL¦N*oÊP^^®D2339sæ ]]]8::§D$$"Ä·£CÇްƒlc%”àˆ#³˜EÂåË$&a…Ú¥ !İSQQÁîݻٳgéééhµZ‚ƒƒY²d K–,aáÂ…C6 pµ½{÷òè£âááAjj*ãÇW»¤vîÜ9qqqa×®]ª]Ò€Ž9B||<ï½÷ßûÞ÷Ô.GƒÈÈH–.]Êßþö7µË·Èùè£ÈËË#00U«V‘’’ÂìÙ³±°÷•BÜÍi$4¶±=ì¡ÖÒíÔ IDAT“N¦1•¬ä~î'‚o·z™B g:ŽÊÊÊ~«ÎšG^^mmmØØØ töè;4 ~~~*¿!Ä·ÑÕÕECCZ­–ªª**++ûí÷½­ººš¾SuìììpwwÇÏÏFÓo¿ïmÃæ3'!„BŒ•••äææ’““£lÏœ9£t/Óh4ÄÆÆ2fÌBCC•Ž­­­tvv¢ÓéhnnF¯×ÓÚÚJKK 555Ê{§††ZZZhoo§££C ÕXXXô»~=£FÂÍÍ [[[pvvÆÆÆWWWììì°··ÇÅÅ\\\°··ÇÎÎnÀÛ\]]±°°P®ØÚÚÒÕÕÕïgš'}kojjRöÍÇZ[[©¬¬¤¦¦†ššêëëû=“““ø ¾f!€€™[%Ä0”˜˜Haa!§Nº-sL%"ÄH£×Ã¥Kýƒ"æí¥K¦ã..W"áá AAbÚJÛX!dee‘™™©„ªªª°²²RºˆLŸ>É“'KÀaÝÝÝäåå)­ û¶ƒ´±±a̘1ýB"Ó§OÇÆÆFí²…vŠ("ýò娧gœ™Á % 2™ÉŒb”Ú¥ !İb4ÉÎÎ&==ôôt:DOOqqq$$$””4ä'JWTTðÐCqâÄ Þ|óMž~úiµKºa•••$&&RWWÇgŸ}F\\œÚ% 襗^âïÿ;§Nb̘1j—sǽóÎ;¼øâ‹äçç¬v9â&˜C 7n$??Ÿ   V®\IJJ sæÌaÔ(yï(ÄÝ,—\v^¾æ0–X²€¬`ËYŽÚ% !Ä¡×ë)//0ðažämæîî>`à#,,Œàà`ù~@ˆa¦©©‰ššêêꨫ«Sºx˜;yÔÖÖ*·777÷{ìõ:y˜WzöòòR&öÙÛÛ«ô …B!èƸ^· NGQQÕÕÕ”——S]]M}}=Z­–îîn¬­­±³³Ã‚Q£Fa4éèèP:v|¾Í C¿ÛœœpqqÁÝÝooo<== ÀÏÏ{{{ÜÜܰ±±ÁÉÉ GGGlllpwwÇÆÆGGGœœœ†Uç4½^Omm-UUUJH¤²²’ÚÚZŠ‹‹•Î(€éß(((¨_—ɰ°0"""ˆŠŠ’÷šB Aëׯç™gžá‹/¾`öìÙ·å9%"„¸¢§ÇÔAäê®"……PZ Wîëæf †›F` éº94âë Cx“ƒ­¤¤„cÇŽ‘™™Iff&§NB¯×ãàà@ll,“&MbòäÉLš4‰ &HÀá*ƒsçÎõ ‰ôí$2~üxbcc?~¼²ïéé©vÙB +}"ûØGMøàÃ4¦1—¹ÌaÓ˜†-¶j—*„ÃJCCû÷ï'==]»vQVV†§§'óçÏ'!!{オ˼Fww7?ÿùÏyýõ×yâ‰'xóÍ7q& hµZV­ZÅéÓ§Ù²e ñññj—t®®.¦OŸŽ³³3‡Q“Àôz=‘‘‘$%%ñÖ[o©]ޏæȆ ((( $$„åË—KDˆ vö³Ÿìd»(¡OLWWÑÑÑ$''“Àw¾ó!NKKãÉ'ŸÄÙÙ™>ø€9sæ¨]Ò éêêâñÇgëÖ­üå/’]Nrrr˜:u*¯¾ú*?ùÉOÔ.çŽùË_þÂO~ò.^¼H@@€Úåˆôôô‘‘AZZŸ|ò ………„††’œœ,!!îr=ôpšÓà»ÙÍ|“™Ì2–‘H"Ó˜†%#'È(„¸{UWWSZZJiié5eJJJèêê®LFh„……áêêªò+B\­³³­VKUU•••_»_SSCOOòX[[[F»»;~~~h4ÜÝݯ¹îççG@@Àú G!„êjmm¥»»­V‹Á` ­­M Xô=ÖÝÝMkkëM3?ç71wÐpppÀÖÖö»e8::¢Óé”ðHcc£­¨¨PºN8;;θq㈈ˆ 22’ˆˆÂÂÂÐét”””P^^NEE…²_^^NYYÕÕÕÊ{.kkküüüúu°è»õóóÔÿN#•Á`   €óçÏsáÂe{áÂåÿ+///ÆGTT111ÄÆÆ2qâDÜÝeNˆƒE§Ó1oÞ<ºººøê«¯°³³»mÏ-!ÄíÓÝm Š”–Bq±©ÛHi©i””˜Fß7ªžž¦H` ©£H@€)0âïo øø€L:w)£ÑH~~>§Nâäɓʶ©© KKK"""”pCLL ãÇ'<<+++µKRªªª8wîgÏžU¶¹¹¹ttt`aaAXX±±±ÄÄÄ0a¢££‰ˆˆí…ø½ô’Gdp„#ã¸À(F1ŽqÌd&Ó˜ÆT¦2‘‰X3|Ú« !„š:::ÈÈÈ ==;v››‹££#³fÍ"))‰+V¢v™ÔÖÖòôÓO“––ÆüÇðú믋÷N½½½üáà•W^á©§žâ/ùËkþûßÿžµk×òå—_2qâDµËt:ŽˆˆV¯^ÍŸþô'µË}˜C ©©©|òÉ'TVVFRR)))Ì;Wí…ƒ$‡\¾ä 4â YH"‰,e)>ø¨]¦Bܽ^Oyy9•••TUU)=Ì×óòòúM¤rww¿¦»‡y„„„`aa¡â«bdëé顾¾¾ß0w먯¯§¡¡ººº~Ý= C¿çðòòR†¯¯ï€=ÌdžKwT!„BܘÞÞ^ššš® h´´´ÐÝÝ}ÓÇZ[[1 »®®®XYYáêꊭ­-JH£ï±¾a ó1kkk\\\°··ÇÎÎN¹Ÿ³³s¿ðÇ@‹ù´¶¶*xs÷ˆ¾û}ñcÆŒ!""‚ˆˆÆŽ‹¯¯/666èt:åüªï¹Vii)Ýæ£¹r~Õ·“bßý   ™ç4Ä”––’——Çùóç• ÈÙ³g•yƒ‚‚ˆU"±±±DDD`i)‹Æñm=ýôÓ|òÉ'|õÕWDDDÜÖç–@ˆâÎjl¼))1u)/7m+*L£¹ùÊý­­Ma‘À@S@Äßßñó3HÌiÁ-î"—.]âäÉ“dgg“““ÃÙ³g)**¢§§[[[¢££‰ŽŽfüøñJX$$$DVl½Jee%YYYäææ’““Cnn.çÎSNl5 111„……­ì‡……©\¹CS=õd\¾|É—œä$-´`‹-™È´Ë—©Leã°@¾8BˆoRPPÀž={ؽ{7 ½½ÈÈH/^Ì¢E‹ˆWmbBoo/ëÖ­ãÇ?þ1cÇŽåƒ>`üøñªÔr³ÒÒÒøîw¿Kll,Ÿ~ú)ÞÞÞj—¤0ÌŸ?Ÿææf¾üòËÛºêËPôÆoð³ŸýŒÂÂBYåk0;vŒÔÔTRSS©ªª"::š””xࢣ£Õ.Q1Š(âG8ÊQv±‹2ÊpÄ‘YÌ"áòe“äN1¤iµÚkÂ}'%+«ÏÚÚÚâïïÍ$$óõÐÐPé*-ÄÔÙÙI}}=uuuÔÖÖ*!†††~A¾ãê),£GÆËË OOOeô vô zxzzÊ„C!„bÒëõ´··ÓÑÑAWW--- š››Ö7¼qõ±ÁÒÞÞNII %%%ý:!šÏ•êêê”ûšÏ‰BCCñööÆÅÅ¥;Iß•••èõzF…¯¯/H`` ²„F£‘÷^w‘ŠŠ Μ9ÃéÓ§9}ú4gΜ!??Ÿîînìí퉉‰aâĉLœ8‘)S¦'ç×BÜ„÷Þ{5kÖ°mÛ6’““oûóK D1ôtt˜B#ÕÕ¦mß°HßÉå7 8;_ÛaÄ××44ðò2sqQïu ñ-tvv*ÁsH$77—’’ÀÔªqܸqL˜0˜˜¢££‰ŒŒ$((HV3룫«‹¼¼ BˆëéêêâÈ‘#ìÙ³‡}ûöqúôi¬¬¬˜9s&‹-bÑ¢EL›6펯¾sñâEžxâ ²²²xùå—ùùÏŽ­­í­áVœ9s†•+W°eË–!ÕãÒ¥KLž<™Õ«Wóî»ïª]ΠÑét„‡‡óðÃó?ÿó?j—3bõ |üñÇTWW+!|qãÆ©]¢â6«¦šÃ&tö²—bŠqÀÙÌfs˜Ë\¾Ãw°aèwÿBŒ ßÔÝ#??ŸÖÖVåþ­>Û÷zhh¨,ž$Ä êììD«ÕRUUEee%Z­V×»íjîîîh4ÜÝÝ•áçç×ï6óu///ùnF!„Díííèõzš››éîîîÐøºcæÆ»vvvØÛÛãä䄵µ5nnnX[[ãìì<à1s@ã뎙} ‡ó…êêjÊÊÊ”°Gqqq¿ð‡¹“€››Fy/eoo••ÝÝÝtttPSSCUUeeeý:«™ß“ ÔÕC£Ñ‚£,Ð<âét:rrr”€È™3gÈÎÎF«ÕbeeEtt4S§NUÆÄ‰5 %ÄpuàÀ–-[ÆO~ò^{íµAùB _55¦ÐHy9TVö”•™Ž×ÕÁåU¡°³S8ÄËËññooÓmÞÞW‚$ò¦V ---\¼xQé€aÞ`ccC@@@¿ÑÑÑÄÆÆâ"©~ÊÊÊ® ŠäççSZZ €µµ5aaaDEE®Œ1cÆ,«0pŽsJ@ä$'9Ç9ôè±ÃŽb”€ÈÄËäo‘B ¤¾¾žžžÎž={())ÁÉɉ™3g’@BBS¦L¹#µôôôðÞ{ïñÒK/áããúuë˜?þùÙßF}}=<ð™™™¼õÖ[üà?P»$EZZË—/ç½÷ÞãÉ'ŸT»œAñÇ?þ‘µk×RXXˆÚåŒ(}C ›6m¢¦¦F <üðÃDFFª]¢â6ºÄ%2Èà‡8À (À;f2“ùÌg ˜Ît €!T3Pw¾×êîquØC&$ 1xÌ¿£_ì0_ohhPVŒ6³³³»n¨c Û|}}e!3!„âk˜C:ŽÎÎN%”ÑÚÚªtÆèéé¡©©Iéðp+éêꢣ£ã†jrvvÆÊÊ www¬¬¬ú4nõ˜9 Ñ÷ØHQWWGee%eeeJ8¾´´”ŠŠ %ð¡Óé°°°`ôèÑŒ='''lllèííUþû655Q__ßïùÍA___üüü”­øúúˆ½½½/_Ü%ŠŠŠ8qâÇçĉœ³˜…=òy¥bðéõzêëëû…=ú>.^¼HKK‹rÿoêî"Å…øÌÝ;n´ƒGMMÈ2³³³»¡`‡»»;²ø˜Bˆ»žÑh¤¥¥ƒÁ@[[›º0wÁ0‡.š››1455)¹:¨q#¹QnnnXXX(Ý0œ±±±ÁÑÑ{{{ìììpttÄÆÆ,--qssÃÒÒWWW¬­­qrrRî;PxÃ|Lܘžžjjj® y˜ƒ•••”——+a\\\pppÀ‚žžt:]]]ýº€@ÿŽæ÷eæ}óVÞŸ µôôôŸŸÏ‰'”qêÔ):::°··'..®_'‘¨¨(9ÿ#B~~>óæÍcÒ¤Ilß¾}P;èH D!úÒjMÁ‘ª*ÓV«½²ß÷¶êjèûçÓÎÜÝMÃÏϹÞupsSï5ŠÍh4réÒ%òóó9þ¼t¸pá555€i¶ÐÐÐkB ááá„„„`kk«ò«:),,T"}÷ÍmÐG…¿¿ÿ€ÿ–!!!x{{«ü*„PG)¥ýB"Ùds‰KôÒ‹=öDMÌåË&M4Á«]¶B F£‘ììl% rèÐ!ŒF#“&MRº‡Ì›7oÐÞ·ýûßÿæ¿þë¿°°°àw¿ûO<ñÄo±þùçŸóÝï~777RSS™0a‚Ú%ÑÓÓÃÒ¥KÉÏÏ'++ ~Çu:666Cþñçž{Žõë×óüóÏóòË/ãããÃïÿ{~ýë_SXX(ïw‘N§cß¾}¤¦¦²}ûvš››•È£>ʘ1cÔ.Qñ-URIYå(G8BYèÐáƒÓ˜Æ¦0—¹ÌaŽ@„·]GGeeeʤ¥ŠŠ eRSUUeeeTWWcþªÙÎÎŽÀÀ@‚‚‚”LPPr»L&âÆ™'666ÒÐÐ@}}=µµµÔ××+£®®®ßm]]]ýžÃÁÁOOO¼¼¼ðööÆÃÃOOO<==ñööÆËËK¹nB!Äp Õj¯ j˜;^˜Cmmm š››éééð1_Ô0?æF™_Ô0wÇ05nä1× wˆ;G§ÓÑØØØ/\kÞ¯ªªêw¾ÔÝÝ­<ÎÎÎ;;;¥3‚^¯§³³ƒÁ ÜÇÆÆFéÜáíí¿¿?>>>JgFƒF£ÁÇÇGþ»‹aÇh4’››Û/$rúôiºººprrbòäÉýB"cÆŒòß7 q3 HHH@£Ñžž>è]o%"„·B§3u©ª‚º:Ó0w"©¯7šš+Çú¤»S÷OOS§OOÓðò2u"ñòºr›ù¸´@w@SS“én(,,¤®®0µ£ ìףèêꢢ¢¢ßÊxæŽ-ùùùʶ¶¶øûû+«á]½B^hh¨œðˆ£vrÉåçÈ!GÙ–S€ .ÄÃxÆC ãGQ¤råB¡®¶¶6233•€HVVÌž=[ ˆLž<ù¶¾§hjjbíÚµüõ¯eÒ¤I¼õÖ[Ìœ9ó¶=ÿ`(//硇âÔ©SüùÏæ©§žR»$jkk™û,6l`Íš5üæ7¿Q»¼»NßȶmÛhkkcÖ¬Y$''“’’BXX˜Ú% !nQ7Ýœæ´ü8Â.q K,‰$R ~La ÑD3 ù¬@qëêë멬¬¤¬¬L™¸d˜·}'¿ÙØØàç燿¿?øùù)AsØÃ××WÅW$ÄÐöM;}Wfvvv×íÖq½Û„Bˆ;A«ÕÒÛÛKSS===477_ÔèììD§Ó)A––ŒF#Z­öº¹^¸ãF888`kk«„.\]]ûuÉpqq¹&¨q+15ÄðÒÛÛKmm-µµµTVVRSSCuu5UUUTWWSRRBuu5555tttô{¬ò0 ýæã^^^øúú*!ܾ×Í!___ äŠÇ`0pöìYŽ?®„Drrr0 ¸¹¹1uêT¦M›ÆŒ3˜1c†|Ö †­³gϲxñbÙ½{7£GôŸ)!„¸ÚÚ® Œ˜¯×Õ™®×Õ™B$õõpÕɦ`ˆiëáa£G_Ù7‡HÌ×Ôy­â®ÔÒÒÒ/ RXXHQQ………”••a4ðòò"<<œ°°0¥Fpp°²•î"&ƒºº:ªªª® ŒQZZúµ‘¾¡ Œˆ‘ ‰¦~!‘\r9ËYê0…Õq$Š("‰$šh"‰$Š(Æ2äX!ÄÈSTTľ}ûØ·oû÷ïG«ÕâççÇ¢E‹X¸p! .¼m“B²³³ùÏÿüO222xúé§yíµ×ðòòº-Ï=º»»ùÅ/~Á믿ÎòåËy÷ÝwUÿÂ%33“{_ýêWüô§?eÓ¦M<öØctwwãããCyyù^ùËÏÏOéŽ`mm À÷¿ÿ}^}õU|||Ô*í®ÑÙÙIzz:©©©lݺ•öövfÍšEJJ )))2ÉKˆa¨‡òÉç$'É"‹L2É"‹.ºð‹™Ìd³˜Íl¦2Gd±!ÄÓjµ)«Õš·æÛÊÊÊhmmUîogg§L$7/RcÞ7oƒƒƒ‡ô{R!î”––©¯¯ï×½ãzÛ†††W···gôèÑxxx(Û¾ãêc£GÆÓÓS~…B|#½^O{{»¨0wÀ0oÍûæ E{{»ò½^¯/Ì2Ì[sxÃÈhjjR 7:õÐÖÖ%táä䄵µ5®®®XXXàî.¬­­qrrúÚ Æ7=FŒL×ëæQZZJii)eeeÔÔÔPWW§ÌsÓâ¨æ÷ZÝÝÝ×ümccƒ··7xzz^Æí{]£ÑÈ!n‚N§#;;[ ˆ?~œ .ÐÓÓCpp03fÌ`æÌ™LŸ>É“'Ëßx1äeffrï½÷ËöíÛqvv¾#?W!B1µ·›‚!ÕÕW$æÀHCÃÀãêUìíûG¼¼®\hxz‚››:¯W kz½žâââk#%%%ÓÒÒÀ¨Q£Ðh4×EÌûÁÁÁØÙÙ©üj††o Œ”””(N ô…mß/mÃÃÃq“ßmq—j¤‘ó—/yäqžó\àÅcĈ%–„ªt‰$’pÂÃüñ—u…#‚ÑhäĉìÛ·ôôtŽ;†^¯'**Š °`Áâããñðð¸åŸÑÛÛˆ xùå—ikkãå—_æG?úC8¤~ìØ1}ôQÚÚÚX¿~=IIIªÖóÆoðÒK/ñì³Ïò·¿ý @ùÂi×®],]ºTÍò®Ë`0`gg7àª|ÖÖÖXZZòâ‹/òÿþßÿ»#+ßÜM:::øüóÏIMMeË–-tvv2sæLRRRxàÐh4j—(„¸AÝts Jøã$'É&›6Ú°ÆšñŒgæåË,fA„Ú% !†(NGeeåuƒæINæ…fe2Òõaaa¸»»«øª„P‡y¢à7uéè;‘°¡¡½^Ís}]׎ë u !ÄÈp³¡Š¾]6®î¶a4iiié÷|­­­JG s¨ãF™Cæ­yß¼0w½0oÍá s üuqqÁÒÒR h¸¹¹1jÔ¨ƒBÜ £ÑH}}½2êêê”`‡¹³GMM Z­–¦¦&ºººú=Þ‚ÞÞÞkxxxàåå…F£!00ooo<==ñööÆÇÇGéìáåå%!î°ææf¾úê+233ùꫯøòË/©««ÃÚÚš‰'*Df̘ÁرcÕ.WÅÖ­[yì±Ç˜?>üñ )!„¸[46š‚#Fêê®ì›ï£ÓõKËëFF¾2ÜÝûo]\ÔyÍbXÐjµS\\LII —.]Rö‹‹‹innVîÛ70B``  “È.ÓÏ3| IDATëõ”••Q\\Üo%‹òòrÊÊÊ())é÷Ÿ»»;@@@AAAÊ¿k@@€tow•.ºÈ# —/æÀH>ù´cúݰÎ1—/æˆy?ˆ ,‘Õö„w§ŽŽ2228räGåСC ÂÂÂHHH !!Å‹ãêêzKÏýÖ[oñÛßþgggþû¿ÿ›üàCvÓ¦¦&žþy6nÜÈóÏ?Ïþð‡Wú÷¿ÿÍ;ï¼ÃÎ;q¤sƒÁÀ¬Y³8yòd¿/¦¬¬¬¸÷Þ{Ùºuë üÜo«  €ˆˆ¯Ÿ¸lii‰››ÕÕÕXYYݡʆ§¾!Í›7£Óé”ȃ>(­Ñ…ºé&<²ú\NqŠ:°Æš"˜rÕÅYÑNaêvu¸ãê.ÕÕÕÊ{E<<<¾¶³GHHŽŽÒaHÜý:;;¯ oÜHÐãjæ`Ç@áë=|||†ì9¯Bˆ+®U\®¸:Tq»ºlܨë…*ú†+ú^wssÃÂÂWWW%tÑ7¤áì쬄0ú†:•.B¨¥³³S vÔÔÔP\\¬„;ªªª¨¯¯G«ÕÒÒÒB{{;º«ç5 ÀÒÒ\\\pww¿&äauøúú*û666wàÕ !n§ÂÂB¾üòKeœ:u ½^ÏèÑ£ûDf̘!‹_ˆ;®§§‡_ýêW¼öÚk¬Y³†·Þz kkë;ZƒB„b$koÿúÀHßÑØh´¶ÆÒrà Èleúˆ§Õj•pÈÕ‘²²2”û:88\j $ @ ŽÈê"&Z­–òòrJJJ” HYY¥¥¥”——S^^Þou ___üýýñ÷÷' ___‚‚‚Ðh4øûûãçç''Lâ® EKÑU—œË—&š°Æš@ àM´LØBÜUÚÚÚÈÌÌ$==ôôtNž<‰……qqqJ@dîܹ7µzIMM k×®eýúõDGGóë_ÿšäääA|ßÎÆyî¹çðòòbýúõÌ›7O9ÖØØHxx8MMM,]º”´´´Û>Ù§µµ•Õ«W³ÿþ~«9›YZZRVV6$;BìÝ»—%K–|ãýþû¿ÿ›_ýêWw ¢á§©©‰íÛ·“ššÊ¾}ûèîîVB ?ü0ÞÞÞj—(„¸òÉïþ8ÉI:é0ü1•©Ø!Q…Izzz¨­­¥ººZY¹¶¢¢¢ßÖôè;ÉÉÅÅ…€€å3¹   üüü”ÏíüüüðññQñ• qûÆ~††•mßý††eô]pÊÌÑёѣGãááÁèÑ£ñôôTöÍÛ¾ûæ­;„bð™æ@†9¨aï8¥K†¹›Fß Gß.WwÛ¨Ëƺ:\ñM¡ŠÛÕeCˆáÌÎÕjµ\¼x‘óçÏS]]M]]UUUÔÕÕ)ásÀêëXXX`gg‡££#NNN¸ººâîîNPPaaaâééÙ/¤ëëë‹……ÅzÅBˆ¡¢««‹S§Nõ ‰1jÔ(ÆŽÛ/ {Ç'ç‹‘£¥¥…Ç{ŒÝ»wóÖ[o±fÍUê@ˆBˆ›ÓÛk …˜Ã!7³íì¼öùo,8âæví0ɈÐÑÑ¡„ÊË˯é†QZZJ[[›r77·»aøùù)_œÞÊŠ×w£êêjåßÒüïZUU¥l+**èìó{kooO@@€²š†F£Q¾„öóóSŽÉjb¸(,b¾\⽘N4hˆ!æš°H$‘8!¡4!ÄðV[[Ë¡C‡HOOçÈ‘#äææbooÏäÉ“™;w. ÜsÏ=7ô¡éùóçyå•WضmÓ¦MãÕW_½¡ð€ª««ùáȶmÛxúé§ùãÿˆ““Ï>û,ï¿ÿ>ƒKKK^|ñEþøÇ?Þ¶Ÿ[QQÁ’%KÈÏÏ¿îªÖÖÖ¼öÚk¼üòË·íçÞ.o¿ý6/¼ð€A³W_}•_þò—w°ª¡O«Õ²cÇRSSÙ»w/£FbÞ¼y$%%ñÈ#àåå¥v‰Bˆ>zéåÿ³÷æArÕç½÷§÷}›½G+I€d@`°lðŒ°]ØŽ‚!vR8ñƵ)‡r®q½I¹^'\Ç×÷½àÛ 1Ž+eâ$6‹`‹AHlB ¡uÖžé½OïýþÑýûÍéžžži¤‘FÏçT×Y{útk4çô9Ïçùîg?/7†—x‰—y™×x%¸€ ¸°1\ÄEœÍÙ’:(K˜B¡ ¥Ž±±1>ÌØØØ4Éctt”J¥¢ŸçõzY¶lýýý 200 =”ü±bÅ ix#œÒ˜ çókúÿ¢p»Ý:Z“<çÕÐ@átäXd •|¡’0TB†JÌP *YC¥f¨Žù R3”4¡dŠ™¤ŠÖ”Ö´¹¤lÂéL¥Rarr’¡¡!Ýlrxx˜ÑÑQÆÇÇurG"‘ “É`Åb‘N¥§V«U T@@§tôõõ100ÀŠ+tcÊH$ÂòåË¥–C„cbll¬IÙ¾};©T ÇÃE]Ä%—\ÂæÍ›¹ä’KXµjÕbï®°xüñǹþúëÉçóÜ{ï½\z饋¶/"„‚ 'èË!3 #3É$ñ8T«ÓžÇÓ^i}Ì$”ˆù»dH$Zi”Hbî4áõzY±b…¾È000Ð$6,[¶Œh4ŠK¤#}ÑçðáÃZ æðáà éî†UÓÿÑþþ~ýÙªÜýýý,[¶Œ¾¾>}Ã[nÌ §I’ìe/oò&{ƒšb –³œ³8‹38ƒU¬b5«YÅ*V²’å,Ç{A8µæÉ'Ÿä‘GáÁäСCøý~6oÞ¬D.¼ðB,ËŒ?c×®]ÜrË-Ü{ï½lÞ¼™[n¹…÷¿ÿý'ð]Ì{/~ñ‹øý~n¾ùfn¼ñƦó€þð‡|á _X×Û°a»wïžu»U«VñÖ[ouüœƒ›o¾™Ûo¿½)yNa±X¸í¶Û¸é¦›aÏŽ>ø ^xá¼»rONNrß}÷i ÄjµrÅWpÍ5×ð‡ø‡r£SN&™ÔÂÇ®Æð2/“& ÀjV³±1œÏùlbkY‹éB)KUÈ><<ÌÐÐÐŒãÖkaæ‚õ™ÆªX]NvŠÅ¢–4‰Äœ…ŽD"A:žöóìv;‘H„p8<«Ð¡¶S @`>A„Ã|$Œt:M¹\>fC¥sÌ%_¨„ •|¡’0T2†*øViJÜP†.”€¡„ €H$ E %r‚pt¤ÓiöïßßÔRI$ Ø¡þf”Ëåi×ÁÍØív¿ßO0$ÓÓÓC__ŸN3\½z5Ñh”îînº»»%M„E§Z­òꫯ²}ûv¶mÛÆ3Ï<ÃË/¿L¥R¡¯¯‹/¾˜K.¹D»»»{—…S„B¡À׿þun½õV>üásçwÒ××·¨û$Bˆ ‚pjJA"qtÆE´iø|sJÌ`B¡úCŠÙO)b±˜ÌbÑ#GfÌ)¢Ñ¨îÔFµèÐ××wÚ'b”J%FGGõg«d‘#GŽ0<<¬/2Åb±¦ç…ÃaýYšes—ÄùÂ%œôäÈM“Dö³Ÿ!O]H³acÁi¢È*ÓàÁ³ÈïF¡3ûöíã‘Gá‘GáÑGerr’ÞÞ^Þ÷¾÷qÅWp饗²aƶÏݶmßüæ7¹ï¾û¸ôÒKùÚ׾Ƈ?üáY%‡_þò—tuuñ¾÷½ï8¼£é¨´|jµ:-¹Ãf³ñðÃóðÇüZ>ú(7Üpo½õVÛn¸f~ûÛß.Èk.$W_}5ÿùŸÿ9­ œÅbáÎ;ïä3ŸùÌ"íÙÂc_üâù×ýWn¼ñF¾÷½ïÍúœ‰‰ î¿ÿ~î¹çzè!l6›–@>ö±IçIAXDJ”x×ÙÍn^ážã9v³['† ³¡1œË¹l`p=ô,ö® ‚p†1«ä144¤‹2‘H¤£ÜFY¹r¥­ '%G›Ô122Ҷ˳Ûíž“ÐÑú¢^ANZÌRÆñ–1ZS9æÃbÈ*uC„ÅÁ0 FGG9xð ¾ç>22ÂØØLNN’L&I§Óär¹&±c&¬V+v»§Ó‰ÇãÁçó  …Btuui¹CÕE¬X±‚³Ï>[¾ï‚°äÈf³<ÿüóìØ±ƒíÛ·³}ûvÞzë-Î<óÌ&Iä /Äëõ.ò '?þ8_úÒ—8pàßùÎwNšû¡"„‚ §æt’v|~æmÆÆ`¦â,·»ž@Òúðxf^g^ÂIÖå÷t¦\.k¡A¥`(iD]h9|ø0©Tªéy½½½ô÷÷Fh’E–-[¦×©‹«§+Åb‘±±1Ž9Âè訖EFFF´4räÈÆÆÆ(‹úy.—K–ê"T¿Ž”5OŸîŸ±pr'Î>ö1Äà ³Ï4¼É›$˜*8‰a kˆeAÖ´ "‹øNAšQ]u~ÿûßóÈ#°uëV’É$Ñh”Ë.»Œ+®¸‚~ðƒ¬\¹²éyO<ñßüæ7y衇¸è¢‹øë¿þk>ö±µ-Êårô÷÷“ËåøÁ~Àç?ÿùòÞîºë.>ûÙ϶-„²Ùlø|>ž{î9Î:ë¬c~­r¹Ì]wÝÅ×¾ö5](ЊÃáàꫯæç?ÿù1¿ÞBÒšpb±X°X,Üu×]\ýõ‹¸g ËÞ½{ùøÇ?Ξ={(—Ëôöö222Òöw6‹ñÀpÏ=÷ð›ßü‡ÃÁûßÿ~®¹æ>þñË TA8ÁdÉòZcxµ1ìb{ÙK… nܜ˹¼·±‘œÇyld#ƒHA8ÙÉd2 366Æøø¸žÑ_Ú]gr»Ý LK´]¾|¹¾þ400@__Ÿ± ‹Š*0>±C¤AN”<¡¤ ³,¡ %f(CýmTÂ…-`*iCIJâPSÕëMRÆÑÊó‘0‚Á 6›MË*C„S%¥:tHר”ŽT*E:ÖI™L†\.G>Ÿ§T*Q©TÚžËAýú«ÍfÃápàv»ñz½‚Á ]]]twwëû郃ƒ¬X±‚Õ«W³lÙ2IëAè@,cûöíìØ±C‹"ãããØív6lØÀÅ_Ì;Þñ.¾øb6lØ âìiÊáǹù曹ûî»ùÀ>À?þã?²fÍšÅÞ-!‚ ‚0µZ=i$™œz¤RÍÓ*¤Ý:5݇c*u$©Í)$æi•P€ß_‡Ãõ±œhžPÔu£YÝ\V7›ÍëÌ]8Ünw[YD%‘ôööê 4~¿ßáâ‹Åš>ËÑÑQ†††ôç¬nö7]s¹\ZÐQŸi¿þœÍÓ½½½r#S8)ˆç8ÈAö75€Œ3®· Ö©"*eD%¬`ýôcE~¯AXÊå2;wîÔ "O<ñ…B5kÖpÅWpÅWpùå—ëô¯çž{Ž¿ÿû¿ç¿þë¿X¿~=_ýêWùä'?‰ËåÒ?óÇ?þ1ŸýìguŠÛg>óî¼óÎYSEŽ…T*Å™gžÉÄÄÄŒ7Þìv;«V­âÙgŸ]0!urr’o|ã|ÿûßÇjµNCCCCôôœ<Ýé™L˜ºy÷ÝwsõÕW/òž-¿þõ¯ùÔ§>¥oÈ*žxâ .»ì2 ~øà׿þ5¿ùÍop:\~ùå\sÍ5üÑýÑin/'‚a†y•WyרÓ^ã5r588‹³8‡sØÈFÞÖÎâ,lHA„ œ Ôj5ÆÇÇÓb‡šÖ×Ô´aMÏWEë­r‡9‰V¥{‰ —Ë‘L&õ#‘H豚6¯k•:òùü´Ÿ9“Їg•:¤£© íPé­²EkªLÉJÐh•-” ¡d sú†4Ôë©tù D %^(áÂ,Z´¦[(‰CIJÖP’LI3É*C„Ó%ä8p€#GŽèDuzrrRŸÏ)™Ã0 Êå2•J¥cJÔ“:l6N§³Iê0'uD"ººº´Ô±jÕ*Î<óL)@A8ìß¿_'ˆìرƒçŸžL&ƒ×ëå /lJ9óÌ3{w…ãH*•â»ßý.ßþö·éïïç;ßùýèG{·¦!Bˆ ‚ œ(Ì $IfÚ&ƒ6Ý‚5­‰$­)%íRKZ—uuÕç…¡V«é›ØfYddd¤©Káè訾®ðx<ôööFéííÕÓJdP7¸ÕºÓUl(—ËMêBœúŒÕ´úw0ñY­Výù©Ï³»»›žžzzzôºžž½\:1 ‹AG8¢SEZ“Fp€ SIV­)#æ´‘(QV²’Ò\„ãO.—ã©§žâÉ'Ÿä÷¿ÿ==ö•J…³Ï>['ˆ\yå• ñío›Ÿÿüçtuuñ¥/}‰n¸žž.¾øbžþy-„œ}öÙlÛ¶M߸_hn¼ñFþùŸÿ¹mR‡‡ÃÁ»ßýnz衽·gÏþò/ÿ’­[·bµZõ{·ÛíÜzë­|ùË_^°×:&&&´œ¢nbÞ{ï½'åУ¡\.sË-·pË-·`±Xš~ögÆyçÇ=÷ÜÃSO=…ÇãÑÈÕW_Ïç[Ľ„¥I™29È>öñ ¯°›Ýìc/ñcŒ"ÄYœÅÖp.ç² ¬a Ø€¹Þ!'šB¡ÀÄÄñxœááa†††t±»y^uÎm=ÿŠD"ZâœqzùòåR¨),(årYËñx¼­ØÑúPÛ©õí¾O¨¢åp8L(ÒvBG»eg> AŽd %hÌG¶P‚†’-ZÓ0Ú¥oÌ%[(Q¢U¶hMÄ€)ÙBIJ®PéJаÙlƒA`*=C ’’!Â\PUúF:&‹5%sLNNÇõùZ6›Õ3‹Å"¥RiV™ê×B‡N òù|ZèˆD"ú>³:‡[¶l+V¬Ðé"u‚ œšT*vïÞ­SDžyæ^~ùeJ¥ÝÝÝZ¹øâ‹Ù´iË–-[ì]Ž‘t:Íí·ßÎm·ÝF¥Ráæ›oæ+_ù î“´¶R„AA8•0Œö2É\–™çç#—ÌE$¹ä˜)‹Œ3::ÚÔq¦nˆæ‹QJlèëëÓ)f‰A]t2/;]o’OLLhi¤U '‹éG»®à¡Pˆ¾¾¾iŸ¯Zf^®.ô Âñ¦H‘Cbˆ!ra†9܆â‡a„2S7zé%J”¬`Ëd¬hGß_A–L&ömÛt‚ÈóÏ?ÍfãüóÏçŠ+®`Ó¦M<ûì³Üu×]†ÁUW]Ž÷ÞÛög¹Ýnþû¿ÿ›+¯¼rA÷ñàÁƒœqÆÔjµ9FØl6¾øÅ/rûí·/è~@=™âÆoäÈ‘#T*, ëÖ­cÏž= þZGÃŽ;¸ä’K°X,¸\.î¿ÿ~.¿üòÅÞ­allŒO|â<ùä“T*•¶Ûx<\.ùÈG¸æšk¸òÊ+›ÒmA8:jÔ8Âöš†×xÝìæ-Þ¢D V±Šõ¬çÎálÎÖÓýô/ö[„%ê–Û*t´›m’*Íéh4J?6›$øG‡ú=UIæù™æíZwêwXu©Ÿ-•£u;ù½„“•~S"†’)”Da.Tª…ú;a9”Œ¡Ä %_À”t1SúÆ|h—†Ñ*[(A£U¶0 ­²…4ZÓ7” a~=A„ãA.—ÓG2™dhhHß³œœdbb‚‰‰ -zd³Yý0‹3]ÃkÅn·k™C¥søý~áp˜®®®¦†‚Ñh”eË–Ñ××G(’ë‚ B†aðâ‹/ê‘;vðÆoP«ÕèëëcÓ¦M\pÁ\xá…lÚ´‰3Ï<ó´mø{*1<<Ì?ýÓ?qÇwP*•¸é¦›¸é¦›Nú0BAáte®rÉlÛLL@±8óë,„\âñÔÇ‚F‰!æ ó´f@[‰Au&i]ÖÓÓsZ~)1 cZËv,ÕcdddÚgív»› Ú>˜;ÄÈÍáx'Þ”.¢¦Õx/{I’ÔÛ»q7%Œ´& 2ÈJVbG: ‚ptŒñØciAdß¾}x<6oÞŒÇãáñǧP(tLê¸öÚkùÑ~´`i Åb‘;mÛ¶ñÔSOqäÈ\.åryÆ‹?üáù¾° û`¦P(ðÿðüÝßý†aP«Õxúé§Ù¼yó¼~Ž*:é„9±N„Ì„ÍfãàOþäOðù|lݺ•w½ë]óÚ§“•Çœ«¯¾zÆ®Îf{ì1Þóž÷œ =„¥C•*‡8Ä^öò&o6É{Ù‹Aýï•kYË:Öq6gsç°¾1xñ.ò»„¥C.—#‹éëLªQÆLJòù|ÓóÕµ$U0ÕßßOoo/ýýýMÓ’ %Ì™ù æG,›ñ<®“Ä1›äÑÝÝ-€‚pŒ(yBuco'f(yB%bt3̉*Y£“˜¡^w¾¨ïÈJ–0K*½B¥d˜å %]Ì$[À” Ñš¾¡ž£~® ÂÉD"‘ —Ë‘ÍfI¥RŒ111Áää$£££LLLè5%zd³Ù¦Ô£b±H¹\žsZ‘ÍfÃn·ãt:q¹\ø|>¼^/>Ÿ¿ß¯eu_»¯¯›ÒÙAáx“L&yá…xñÅyþùçyñÅyõÕW)—ËÎ?ÿ|6mÚ¤6lD¼“„§žzŠï}ï{üò—¿$ qà 7pÓM7ÑÕյػ6'DAáØÉd ®S)H&§æÓéú|*Õ¼]<>5­ž—HÀL§&N'øýC0XŸêãH|¾º8¢Öy½õq0X_îóÕŸëñœ–‚I»îä†v]ðÔ Ñv"C»å§cÇ»|>ßT8aNi-®PËZoPû|¾&1Ç,í¨$s:IWW—D F‚G8Ò”.2ÄG8Â!1Ì0cŒéííØé£hcè£e,k«u<‹øÎA8xë­·øíoËoû[}ôQFGGçô¼î¾ûnÞûÞ÷.ø>MLLðì³Ïòì³Ï²cǶmÛ¦÷ËívS,©V«Øl6¶nÝÚ6!#›Í’ÏçI&“ºøÄ\À–L&) d2Òé4…BT*ÕT¨266Æë¯¿Îðð0}}}¬X±BÉÀTáŒ*˜Y,T÷Qh–JT‹*~©¢UH£:˜†Ãa}S7 âr¹ø|>Ün7¡P¨©Xo¡¨ÕjÜ~ûíüÕ_ýÀ¬]N'7Þx#·Þzë‚íƒ ,5†b7»Ù×2ìaYê«”„|.ç² ¬1 «Y•Ó¯1  µZ­íµˆÖkª±H,#—Ë5ý §Ó©“O£Ñ¨îŽF›Ä•T+7¬…*ÌV…ê‘J¥ˆÇãÓ–%“Iâñ8‰DB?O·Ã\È …¦Í«eêѺN ª…Ó•N©ê{g§T‹vbFkšF'1ÃüºóA%bÀT†úžiþv3T‚†úÞi2”ˆ¡Ò5Ì?[½® ©L6›Õ)êúd:fdd„X,¦ÏÁÆÇÇÉd2úe:Æ0 …†ahc®IV«›Í†ÓéÔƒ=>Ÿ@ @$! ‰Dèêꢧ§‡ÁÁA}ÿ3 õßjAA8UÉçóìÚµ‹^x矞^x]»va‡ƒ³Ï>›7ò¶·½7²qãFV¯^-ßGNäg?û?ýéOyùå—yûÛßÎ7Þȵ×^‹Ûí^ìÝ›"„‚ ‚pr‘ÍN‰"J$1Ë%‰D}ܺ,›­§–$“õå†Q_× §.Ž„BG'”Ìôü%@©Tj*P­É#Jp˜˜˜˜V|h·Û§¥ŒôööNK%éêꢫ«K‹$§›DbîdØNØi]6666íbk§î…rU˜7 i ‡9Ì(£ 1ÄHcf˜QFg¼éyA‚ 2¨e‘~ú‰e 1D‰ÒO?}ô-Ò;ádâ®»îâsŸûÜ4AµŸûÜç¸í¶ÛtáÆBQ,›Î8ÀÎ;Ù³g{÷îåðáÃäóy|>›7o&‘H袶D"A¹\îøóg’ÔMRµÍf#‘H‰D‡ÃZŠ€©sÁŒs!ÌL¨âhN i‡aúF°¹K²*‚©Î­æâUSHªðÈ,˨Ÿ£ ‰:a.öSÓ3-S%تߕT*Åõ×_ϯ~õ«yýÎ 044$ä…Ó– ö³Ÿ·xKßl oñEêÒZ„gÍ0ȹŸ t&ŸÏ3999kb©ZÖ®ñ„ùšÁ\LåZÁéG¡P˜Uܘ‹ä¡ºü·#‰tQ_ è(o´;§„S‰N©æïHsIµPbF»4Nb†zÝùÒšjaþžÙIžh3Ú¥it3Z_Wát$—Ëé2¹\N_S÷iS©ZªUòF&“!ŸÏëç—J%J¥’n&3_l6.—K§©k—J΃ºÁJTçnê{EWW—ü=A„”ËeöìÙÃK/½Ä®]»Øµk¯¼ò û÷ïêß‘6lØÐ$‰œF~oa IDATwÞytww/îŽ/<Èý÷ßÏ/~ñ žxâ "‘ŸøÄ'¸þúëyÇ;ޱػwÔˆ"‚ ÂÒÆ0 Ÿ¯ãñùO·[—LB§B1·».‡¸ÝSâÈ|¦Û­ á$%Ú)t*T8räHÛŽG+7ôõõIår¹©«§ºð«>{óØüï¡n@š±ÛíZÆ™i<Ó:Õñ[:'ÎC 3Ü4Ž×Ó‡9LŠæ."D‰2È`Ó8BDO¯`A–†„'ÂtÞþö·ó /Ì«8ßf³FùÉO~Ò6©CaÃÃà 7‰¯JrUËÌ7Z[ñz½M¢ê@ºbÅŠ¶…m‡`0¨‹f"‘ˆ~ŽÐUeNZQÅR­¥Í"N»e­¸Ýnº»»§X,b±X°Z­Ôj5-yÔjµŽ¿‡Ï<ó —\rÉq{ÿ‚°˜äÈñVch?ö³Ÿ8õBD ¢D9£1´JÝÈ *APtJpiY+‘HdV©C-[¾|¹|_¨s"s³õh·¼Ó¶3ÑzΜÔÖi™yùé˜",,R-” 1—T ³˜Ñš¦ÑIÌ8ÚÔÆv©J8ÚT •¦ÑIÌh÷º‚ Âì¨s*óù•šmºW—Ífµd›ÏçÉf³ºÑJ.—ÓÉ…B£)á³Z­8Nìv»þ›ï÷û›x½^/Á`þþ~~¿Ÿ•+WÒÓÓÓt.§Ò{¥ù‰ ‚ ,©TŠ—_~™—_~™]»véñÄĽ½½¬_¿žõë׳nÝ:Ö®]Ëúõë9묳ä:à  žyæ|ðAî¿ÿ~víÚ…ßï窫®âSŸúüà—D ²!‚ ‚ GC<¹ÜT*I6[ŸWé%†1•rbõu‰ÄÔsÔt.WOAÉd ¥ƒã4æšPÒ)áÄåªOàtÖŸ»ˆd2-4˜­bI;ѤÝi¬Ybh•:- .‘d—ÙhwÓ¿ÓC£´+ 8ZqGŠ„v¥5e”*SE¹nÜM’H¤1´HúèÃÎÒ—Æa)ðÆo°nÝ:ìv;µZmÖ„3êfå>ð¶lÙÂøø8CCCú˜¦Š,Íz{{u’™J1ëé顯¯¯iYww7ápxI\<Q‰J¾Sx€X,¦»(ª®Š­ç@6›MT9NÜn7üÇ̦M›X¶lÑh”eË–6ç–©O‰‡8Ä>öés®}¦a?ûõ¹W„kƒvÕüzÖãgaÓ™ád'—ËéF zlž6¯WÒiëyM0ÔçæG»eýýýrŒY"˜¯ÏÌWÜPËÛ¥Á(Tñø|ÅÖeªÛ¿ ÌF§T ³ 1—T %f´KÓè$f¨×/­©J˜kª…3Ú¥iÌ$f´{]AaaQ‚_*•"ŸÏë$T*Åää$cccd³Y}­È0 ‰¹\Ž|>O:¦P(P(0 ƒr¹L¹\>êÄ ¨_WR2žJÞp»Ý<>ŸP(Ô”¤ÖÝÝMWW===tuuáóùðz½:­Cî· ‚ ÂéÃðð0/¿ü2¯¾ú*¯½ö¯¿þ:o¼ñ¤V«a³ÙX½zµDÖ¯_ÏÚµkY½z5+V¬Àår-ö[8a$ ¶oßÎO<Áã?ÎöíÛÉçóœuÖY\uÕU\uÕU¼ç=ïYrŸ‰!‚ ‚ ' ™db09 mR8¦ÑšTÒ.½d®ËZׇÃp:ÉD299Ù¶Ë·ÝnŸ—Hbîø­n .e Ãh›@2—që× «Õª?OsõÖÏTM·ÎK1ÂéM‘"cŒq„#Œš†qƉc„Æ#FŒqÆ©0UleÅJ/½ôÐC/½ 0@ocè£~úõü’>"‹H¡Pà_þå_Èd2|ë[ßÒ…Aó¥»»›åË—³|ùrX¶l™G£QéééYrû„…£V«‹ÅåÈ‘#ŒŒŒèñáÇõxtt´©  ²jÕ*V¯^ÍêÕ«õôªU«Xµj½½½‹ø®„Ó sZ›’<Ìó8 Ï—ܸ›$ópgB:F K“B¡ÐVà˜Mðh×8!ëB­®®.]¼¥ ¸Ú‰ÒµïÔ ]è£WÅîí0wfž¸a^&¿WKUà S‚E»eê÷ïx,Sò…ùuçC»T uÍo.©JÌh—¦ÑIÌh÷º‚ ÂâÐz~eN×H§ÓLLLèdX,†a¤Ói½m2™¤X,ji£T*‘Ïç©T*T*•y¥›±Z­Øl6\.—NÜðx<ú¸äóùt°’2‚Á ‘H„îînz{{ñûýøý~B¡^¯W' ‚ ‚  Ãàõ×_oz(aD}§·X,D£QV­ZÅÊ•+›Æ«W¯fåÊ•§d#šR©Ä[o½Åž={عs'/¾ø"/¾ø"ûöí`íÚµ\vÙe¼ç=ïáÝï~7gžyæ"ïññE„AA„¥N;Q$™„b±žf’ËÕ¥‘x¼¾,›­'–‹õ$õœTª¾,•šzN"³NZ,Sé$^os:‰GB¡ú²@ ž`âtÖŧ³>ï÷ן M‰&Á`}YãÆÞ|˜oR†zŒŒŒ´í4g¾ñ>ÓMûNë{{{—l'¸N²H"‘hz$“I’ɤžžé†òlÒˆšD"m·ñx<'øSƒ5ÆC«,2Æ#Œhqd”Q$šžïÂ5£,¢¦{è¡>z葮؂°@LLLðú믳gÏ^yån½õÖ¶Û9=== °jÕ*Ö®]˪U«8ï¼óذaÃ’=¶ ''cccŒŽŽrèÐ!>ÌØ¿¿ëb¯×«e‘3Î8ƒ³Ï>›uëÖ±nÝ:V®\‰Õj]äw#œ à‡ÃAN›Ï3u>%Ê Ó°šÕz¼šÕtѵˆïFõ]_%ƒí÷üÙ1‰F£§Í÷úS\.G6›%N“L&Éf³º˜0•Jéùd2I:Öó‰D‚L&£çãñ¸N8˜ ÕÉYª"AÕÙY͇Ãaü~?>Ÿ@  ¯S¨BBõN.jµ‰DýZ*>]èeÐYêh·ìhP)æ„ %PË2•¦ÑIÌh—¦!‚ œœ¨Ä'•ङԽ›d2Éää$¹\ŽññqŠÅ"ñx\'®¦ÓiJ¥’çóy …år™B¡ ecÁjµjQCI‚.— §ÓI Àétâ÷û x½Þ¦s®¾¾>ÝÝÝ„B!\.—–7\.—>– ‚ ‚ ,b±û÷ïçàÁƒÌðð0{÷îeïÞ½ìß¿Ÿr¹ŒÅbáŒ3Î`Ó¦M\pÁ\pÁ\tÑED£Ñãº'"„‚ ‚ ÇF©TH²Ù)I¤P¨K#ét}:•ªK%ù|³Œ’ÍÖ§Í2J:]ŸN&§d”ÙðzërH8\—HüþúÃél–Q‚Áú²`°YFq¹êÛ›eõõ³A_ÜVÂB*•"7] O¥RMÓ­²C©TjûÌâB0lšnMÐ0¯WóK59c.òNkÇMõˆÅb3~Þ³øt{¤Ðg颺hlj3̰žnbhš@!B”(‘–aÁ— ÂéH±XœÖ©fÏž=¼þúëú£Çãaݺuôõõ±|ùrÖ¯_Ϲ瞫SNÅ.5ÂéM±XÔØÕcÿþýìÝ»—×_X,ÔÏQ”¢JYªç{Âträ8Ä!†æ0‡9†j>Æ×Û{ñjÁ£UøPƒ ICN ‰„þN×iZ%vÄb±Ó@½^oSJGww·~´KñPóò}ïÄbî}4ɿ鉉‰Ž‡JèÔØ£Ó¼yZ’8UpZ­VuBà|—™…‹cYv´©€$`J~8–ef ãX– ‚ §63¥i˜ïCår9&''ÉçóŒcÙlVoŸËå(•J†A©TÒ¢ÆB¤j(l6›>Ùl6Øäv»õy˜JkRB¬Y¤íééѲF(Ò’‡ÏçÃårIʆ ‚ “Íf›$‘‘‘b±cccŒ‹ÅˆÅbŒO;W …BØívB¡NâTç|~¿‡Ãѱ1„º¶£®ÉÔjµ¦³êºÂãñ088Èàà kÖ¬aíÚµ¬]»–³Î:‹µk×J³DAAN”Lb›O溬ÝúÉɺ´2Üîº,¢d’ÙÆm¶5l6’•JýQ.“²Xˆçó$T¡@²ÑŲT’J¥f,nè$Œ(©$ êŽGj;5ï÷û—\¡¡YÖ1§˜§gJ(I&“är¹¶?×ü›o\´v÷TÛ¨ŽTæeª[•pj’%ËcŒ3ÎĈ1ÑT ‰y~‚ J4 J––J¥¶‰õóE%j¨‡ËåÂn·ãõz±Ûíø|>ª¡„ •´®Î¥ºººzl·Û‰D"ú|Í|LAA–µZMË!J‰Çã”Ëe‰¥R‰L&£Ï—Õù®=Úa±Xô55V×}Í [ûûû‰F£tuIÊùlˆ"‚ ‚ æD•N¢¤‘L¦ž‚’LB¥RO@)—§ÒOr¹ú£P¨/+—§¶I¥¦’OÔÏ› J Q‰%Á ØluÁÄfÃðùHI»”ÕJ²V#uÁH $«Õº`R,’4 ¹‰L†T6K&—Ãè°/fADIª˜b&‰¤ÝsT€Sõ%U "ªyY:ÖE-©TJǘ—̓®:\µÊ%ês4/ úóo·L N~’$g\ "f‰dŒ±iRÉThþÝñáÓ¢H=tu"Dô´ér,_jµ¯½öÏ<ó ;wî䥗^bçÎ:õ`ÅŠœwÞyœwÞyœþùœ{]»ö”?NÂñ¦Z­rðàAöìÙÃÎ;Ù¹s'»víbÏž=”ËeÜn76làüóÏç¼óÎãâ‹/æÂ /”ÿ[‹@ŠC 1Ê(‡9Ì#âCá‡a„"Srw˜0ËXÆr–%ÊJV%Ú´¬Ÿ~,Xñ §#’f{ŒŽŽ¶í,<[JãLIhœª°_uƒnÎd2äóy=m†þþÚnÚ,tBu ƒx<|>_Ót(ÂívÏIÚ…Bø|¾%s,3§M˜;z/Æ2u3þh0Ë椕ÅX¦:‹ ‚ §/Žwccc ÆÇÇ) LNNR(H$‹E’É$Åb‘t:M©T"›Íêä •˜¡ÄŒJ¥B¹\¦Z­R.—DÐP)6› »ÝŽÓéÔc§Ó‰ÛíÆétâñxp¹\ú¼¨»»»)µ,‰hI# ât: …BØl6Âá°>?AAáô@„AAA8‘­`R©ÔŸS*Õ†J8ÉfëÒI*Uß&¯;aq›¼ßa·w¹ˆÛíÄ­VòN§^/—ÉÛíÕ*ñr™x±H¾ZŨTˆÙ,ÅryÆ×q»\DÂa<æâus.ó§r!ê–n¤¿˜Å‘´)FuTí´l&TñŒ’DT§ÔÖeJ8iÝÎëõêe6›í~:B'&™$Ö̲ˆK&™$NœIÓ``Lû9M²H7Ý3Ê#æÁÍÒ(žd2É3Ï<ömÛô#ãñxšŠÓ•²Ô¨a±) ¼òÊ+ìÚµK XJÂr:lÚ´‰Í›7³yófÞõ®w±råʾ9È<ÀgøÌ)-&Ɖ3Äqâ 3ÌCÓÆG8B’æó´Ö°†(QÔcµl9Ë Z¤w%,e*•JSb*•jJ 4'(Æãq½LmÇ)´IÑt»Ý:ÜÔ1—é`0¸ŸÄÉmÌÒͱNw꺧0Îgz¦u‹™Æ¢Þ?ÔLT¡f"QOZR]»&B¥Ñ¨î…0•nèϱ\.kFuÿ†ú¹°* UëÕÏ7/›/ª#" ?ß…^èsó¹.A3J¼PÇQuì­Õj Q­VuZ†º¶],õ6Ôj5R©årY'm¨éJ¥‚aÔj5-{‹E}l^(ìv;‹‡ÃÍfÃétb³Ùp»ÝØív<‡ŸÏ‡Ãá àp8t"F(ÂårÑÕÕ…Ë墧§—ËE__Ÿ~žÓéœ6AAA8ˆ"‚ ‚ K•LÒnϼn>Ûäóñ8y’IãÑqÞf«‹'V+†ÅB¼V#^­’¯ÕH”Ëtú’â¶Û‰x½xœÎºlâ÷itu7:cy|>Ü>‘în<Á îH„HWWÛâ•îîîSª³d»ÂŸNÝ{gZ¦ Xfb>@³­[̡ӣIQƒG&˜h»>CfÚÏòâ1}¤›n"!LXÕ´ ‹–£££<òÈ#üîw¿cÛ¶m¼úê«T«UÎ8ã ÞùÎwêÂó .¸à”•÷a)ðæ›oòôÓOóÌ3ÏðôÓO³sçNÊå2Ñh”Í›7óîw¿›-[¶°qãÆã¶Oñ·qÿÉR¥Ê ¼À\pÜ^ïhÈ‘c”QFaœq†b„†Ã# 1Äc”˜ê¤îÁC´1 0À ƒôÓÏ2–ÑGËYN}’ê!5dŽV¡C¥¶ ª¸½Ç£ÓU¡’5Ìó3IªCÿé€*èW€¹‹´Z¦:Lg2-Äãq]i^V,Éf³d2] 9í¾_y<ÂápÓ´×ëÕÿ¶æiÕIZM{<ž£sŽF²PïÚKf £d¡RN 9]C¥oV«ÕŽÍæ‚JÙ´Ùlús1'Q¨F V«•P¨.ï¹\.¼^/@@‘*áB¥VÍ2³„!‚ 3a>nªã®ad³Y’É$Ùl–\.Çä䤖2*•Š>GQëK¥étZKµZl6KµZ¥P(P*•¨T*ú®Æ •¡0'e¨iuŒôù|@=ÍÜb±à÷û±Ûíø|>\.G'”¼^¯>ïQ "‘ˆ>–[­V}|—†H‚ ‚ ‚ ,EDAAáØ©ÕêI&sC]6i3®LN’*•HÆbd*2‰™J…x*E¶Z%ŸË‘¬VÉõ„¡.›¤£±,Õ˜Îβûn«ÍFÄéÄãtâ±Û {½xŽz¡Çƒ×çã j¼^<4 _#ý$ØÛ‹?ÄÓÝM &00€ý$.šRE6*…$—Ëéä’\.G6›%“ËåÚ®K$ú&¢ZשC›*ž‰D"x½^¼^/Á`Pß°óù|×…Ãa|>_Ó:»Ý~?±¥O‘â4yd¶!ÞÚ 0£,Ònlžöã?ÁïþÔ GŽÛ¹òQÎåÜãúZ†aðä“OòðóuëV^zé%ïxÇ;xç;ß©%㺂 ¹\ŽgŸ}–§Ÿ~š§Ÿ~š'Ÿ|’‰‰ Ù²e [¶láŠ+® ¿¿ÿ˜^§D‰{¹—ÿÃÿáE^ĉ“"õ¢¡­le [âítÄÀhJñ˜mÚŒwSŠG”("Ó–E‰Šè!ÌH:nJ÷‹Çãd2™¶ ídUÄ׎™dŽÖeæùp8L8ÖóK¥±”” :N'“ɶ˔ ŸJ¥(‹¤R)-´[6›8¯ƒ8N‚ª9A㻢êm^æt:õw—Ë5Mê°Z­X,Ün7µZ ·ÛÝ$B´›>‘ë%X˜E‹vÓǺ~>ÛªAA˜NÇÌD"A:¦P(J¥( ú\P‰Åb‘R©¤¯­†¡E‹|>O>Ÿ§P(P©T¨T*”J%ý¨T*Ôj5-B.t2€ÕjÕç V«»ÝŽÝnÇápàp8p¹\Øl6=v»Ýú¡®ßªkµ~¿—Ë…ßï×ç=ê|4´=N‡Ãa,ùn%‚ ‚ ‚°Pˆ"‚ ‚ ©ÍE”äð0F±Hnb‚D6‹‘H`‹Ä''1J%ò™ ñB#—Ã(—Iärä*Œb‘d¥B¶T¨ÕHU*MJ'€ßb!h±à¶ZñÛløív ùÄi·ãs:ñ¹Ý8m6"«(„ÏéÄéóöùpz½ø¼>®`p €#!àõB8  øýàp€Çnw}Úb‹êU'ØD"¡Å‘v"‰ºªd”l6;ã:Õm¶N§SË"^¯·ÛM8Æåráóùô Ç@  »Ç…Ãa}ó1 áv»õ¶.—‹`0(EBGA‚qâ$ƒšn·[VW»šqà˜³H"D°eZ„Oáøó{~Ïe\À¥\ÊÜÈñG8Y˜Ï¡¡!î¹çxàžxâ ÃàœsÎaË–-\y啼÷½ïÅ‚ÿ®‚°°T*žþy¶nÝÊÃ?ÌÓO?M©TâüóÏçø×\s ]tÑœ^’$ÿÊ¿ò¿ùߌ2Š+*z½ ?ãg\ÇuóÞWƒ1ÆC¬1Œ1¦“=Ì) M¯ÛÛúè#J´ã´éÊ~:s,©{jY,ëxÞjNÕk÷hMÞ3?N•´=•Ú ÒÔç£ "UºC<×rºúþ0SšF©Tj»l6|>N§“H$‚£!ùû|>l6~¿‡ÃÓéÄétjÃáp4CÚl6jµ‡‹Å¢‹0Ëå2v»ý˜%‹¹¾—™8™$ ó2÷A„¹¢Ž…æä§\.Çøø8“““är9}¡’¹”Ha>¾–J%=]­VÉår”Ëe*•ŠNµ*‹:é"ŸÏS«Õ¨V«MÇøãQ:c±X°ÙlMâ…ÅbÁétbµZõy‰ÕjÅívc·Ûq:¸\.ìv»>ÖªT u=TÉ¥ækžjyww7V«U'Q©óó1[AAAXZˆ"‚ ‚ ‚p4¤RP©ÆÈfÉŒŽ’J§1&'ë"D,†‘Ï“M&If³™ ¹|žT&C©\&™Í’/•0ŠEÒ…ÅJ…d±H¡Z%W.“®V)Ïáëšp!À àn,sX,Îú2·› Í†Óå"ètâv:ñ¸Ý].>!§w8ŒÇf#ÐÕ…Óé$ÔÕN'¸\àõ‚Ý@ýÅ#‘ú8›­¾ÞåšÚÖf«¯;TAY;Y$—Ë‘ÉdH&“ºÓžê ¬º3«N}¹\NwðS7ˆg#‰4É#ªÓßﯖ&y$‰è”`0ˆÛíÖÛº\.B¡¾+ðšÉ“?&¡¤Fûÿ'aÂM‚ˆyڼμ¾u‹“K ú ¿áC|+VjÔânàó|ž38cÞ?3‹ñÿñÜ}÷Ý<þøã>ô¡qå•W²eË–/_¾ÐoC„“ˆL&Ãc=ÆÖ­[¹ï¾ûØ·ok×®åÚk¯åºë®cÆ mŸ÷oð=¾ÇÜI¹1´ÃƒÛ¸¹‘ ÆgŒ1ÆׂÇ(£mç³-o.\ôÐC/½³ ½ôbG ’—*­²±9•C%n˜Ó:Tú†y;•ÎQ©TÚ¾†ÓéÄï÷ë„@ €ßï'èNÇêá÷û‰D"Mó@@/[Ìâø\.§;V—Ëe‰•JeNâÆlÛæóyÒé´Þv®x<¬V«.Ft¹\8=m±X°X,ØívªÕªî^]*•ô9t±XÔ’†’* ÃÐ…œª¨ôh ƒØl6l6ÁÆ÷™vÒƒÅbÑ…–ê»L]Bý;LÉì0%¬ÀTwl‡Ã¡Å[³  …°Z­Mû"‚ 0 ˆêºX6›¥\.ëó¡J¥B&“ÑÇÏt:M©Tj’.*• †ah¹Â0 …‚N¾(•J:áœtQ(ÔEm•t¡¦kµš–0Ž6› «Õ  …N›Í¦¹J…ú±Öf³a·Ûõñ7àp8°Ùltuuée»ÝNOO‹…`0¨ÏúúúôÏPÏUÇkóù€ ‚ ‚ ‚ oDAAA8I©Õj$ …‚–ŠÅ"‰‰ ŠÉd½Kîø8¥r™øèh½snã&o±P ®Š·²Y²…ÅBD>O±T"S,’+•(ÌPgÆm±à±XöZ`­† ÖÆØi¤¢P—T¼ÔÅ¢&‹¿×‹Ãn'èñ`s¹»ÝX¼^"N'ÖPˆÃ1=õÄœv ÕZOAñxêÛ5аðùêòŠYZQ²Š’TL$“Iòù<ÙlvVyd¦m³Ù,…B¡iÛD"1kGA³<âr¹ºð+ âp8…BºpL­ …BØíö¦uª Ì¼Î\¶Ô10ˆ'O^O·³­ë$–¸qiÂu×]Ç?øÁS¢ ¹ LJ;vp÷ÝwsÏ=÷pèÐ!6nÜÈu×]ÇŸÿùŸ388È“<É7ù&ñ)vüy.\¸qS¤8-Ê‹—zè§Ÿ^zéi }AÍ÷ÒK?ýÏ·.'Ì›Û%mÌwÝ|’8Ú¥n´.›iUÄ¿¨bK5V)ª+v¥Raxx€‰‰ à N“Ï盤Œl6K©TÒ?+—ËQ*•ÈårT*òù|“(1J6P"†Õj¥V«iù¢Z­êyUày,¨ÏšÅ%YX­VB¡zÚ[;ÉÂ,a«da·Û 4¾7˜÷KAÔñêÇÂÑÑQ}m\ƒƒz“µ<›Í6¥U¨ct±XÔR…:f—ËeªÕª)Ôv•J…r¹L±X¤Z­R­Võt­VÓò£yZíãñ,ûP瀖&-A¨T @'oY,-S8\..—K‹êz—ÓéÔÇi¿ß¯Ó1TÊ…ÏçÓ‚F»t*ó9€ ‚ ‚ ‚ œ®ˆ"‚ ‚ ‚pš“H$tQ™ê"œL&)‹¤Ói]È—L&©V«ÄãqjÕ*‰±±z!Ûäd½@­!Jär9Œ|ž|¡@.Ÿ§P*‘Éç)ÍA>°[,ìvœ€ÏjÕR‰ÇbÁ]©àœ• j;õ$+,tTì@  „‰&a‡‹ê¶Ô…³@¢ UŠ9ùDmïtÖ…€p˜r¥‚d+Šå2)‹¥þ¹Øl$²Yr¥i“ð“ÍfÉd2”J%âñ¸.ô¡qÝu×ñÑ~TS ‚ @½ ì÷¿ÿ=¿øÅ/ø÷ÿwùžÝRËSX°Ì(ѵâÄÉf6ó>3%xTzyñ‘¹ëŽ»x衇¸ï¾û¸òÊ+ó;fܲ–Édôù aZxÍf³är9Ä¡ÒÛâñ¸žN¥RM ‡Ãx<|>¡PHÄæõz‰D"zZ¥u¨s•Öá÷û;ʰæNÙ###äóyÆÇÇÉçóº;v<§Z­jÑW=2™ •JE§b¨nتs¶ê”­Š9U±g¥RÑ…œ3%‘,$ªØÒb±èBJ‹Å‚ËåÂãñè¢LU˜éñx´ a.ªlWh¹PÓÇC´ANNJ¥±XL3™ “““zýðð0år=enrrRo§q@=J¥>©Ô*•0¡ŽÏµZMK€>.ú˜\«Õtz…:6+BUÉÄñ(PÒ%L‰˜j¹YdPÇK5­ Ç£“&œN§ÞÎápt²V Ðr†ÏçÃívk¡Âï÷c±Xðù|¬V«>§r:Ün·¾6 ¢¤ ‚ ‚ ‚ œZˆ"‚ ‚ ‚ œPfê}Tã\Žx,F¾PÀhé)%“ËQj̷͆ÇnÇmµâ±ZëóPŸ¯Õ´œB¹\ON)—q×jx*Üår}ÛÆ63i] °Ù°Ï7õÄj¥ìóÕ ¬VrÔ‹òv;ÅZR©DÖ鬋=†AÖj­Ë'Õj]T)IT«ÄS)ŒB‰r¹^ü™Ï“(1 ƒÉÉÉ9w‚n-\ˆ±¹s·ylîð|2‘!CŠI’¤C‚DÓ¼y}»uYÚÏ:Šà,AÁå94¾´c§Fñ1>1ú ~÷ßñãÿ˜îîn>ÿùÏóùÏžh4ºÀŸÀ©Ç–-[xøá‡{7„ãˆü;…B_Üó þ&ÿ7~ûa,ç[ÀœB¬X¹–kù?cïÞ½üèG?â?ø.ìøô§?Íüããý6–í’4ZÏ:¥nÌ´m*•šUXh=>G"\.—.D´Ûí¸\.|>GËn·§ÓI­VÃáp`·Ûµ4Q©T´DSRŠJ½P¢±ê˜].—ÉçóÔj5ŠÅ"Åb‘ZãœGxªbO`A$Œvœ‹E[ª” §Ó‰ÕjÕ¯Õ2Ud©Š0€4\.—$Ô9×ëÕÒ†ùsŸiZŠ5AN=TB”Â0 FGGÈçóMD¡PÐÂf­VÓ²C.—ÓÉÅbQKŒf)B%FA]Î0 CÕ1U͋Ŧïþ*¡Â¼Ïæä)uÜš¤ 5¿Ð¨c±9ÍJÕ´ÃáÐ…ËåÒÒ…J¤€f™¦Ò§Ô´ºÎ ÄS¨sÍÉT===úçF"‡n¶¡ŽÍJbAAAA8Qˆ"‚ ‚ ‚ ,YtbI£ø±X,’ÍfuÊF­‘Òè”t:ÝTˆ¨Š&K¥™L†jµª‹3âñ8€.¤Ìf³ …úëÍQ¤P¸ív<«¿ÃÍb!h·C­FÈáÀZ­â²Zñ6Š."6‹x­V\Õ*¶r™ Õ …þZ G­†Ã0ð7~¾JR1‹)ªG³—zŠŠJVÑøýàpÔ ŸN2i-”T‰J8Qcµ^=OuÕ´Z­„B¡úçÔ(Ö ƒº¨äxQ¡BŠwüô¾qÛÿK5PÄJ|×Aa ”çëÂTaù¥ËùŸ×ýO¾ð…/,Já¨*àQx<V­ZÅe—]ÆW¿úUÖ­[wÂ÷ àýï?>ú輟g±XŽK¡Ó±¢>ç½o ñy¯Ïôhÿg¢õwÙívÓÝÝͦM›¸úê«ùä'?ÙÔå×Ì›o¾É·¿ým¶nÝÊðð0‡sÎ9‡Ë/¿œk¯½–óÎ;¨ÿûýÛ¿ýßÿþ÷yã7Èår¬[·Ž«®ºŠO~ò“lܸqÁÞÏ|yòÉ'ùæw¿Éo¿aõgWãø˜ƒ7\o`Æ e¦‹˜Ç6â¾ÊͳÏ>Ûög^uÕUüíßþ-]tÑñÞýã‚JñJ&“”Ëe’ɤ>gPçj›D"¡»[«sÃ0Èf³¤R)½JåPI¹\nÖýP…n·[D*1AýÞÚl6Ý[ýŸSµ•d¡¶UÒ†šVE ª{÷‰@u»†z¦ê¢mµZq8Z:Q‰ê½+ù" âp8ðù|8"‘‹¥)™ÄãñÐÕÕ…×륧§»ÝNoo/~¿–½As‚S*•"ŸÏëã^¥R!c†Þ>“É4‰¥•J…±±1=_«Õ´`¨H$ú˜èc¶"•JéóÐjµªe 5_(ô|¥R¡\.7Í«´ µ½$Ôþ˜ÇÇóyª%6›MÏ+ÁQ}·5 Ž€>—UÇY5ïv»›„HuÜU©Sê˜ìv»õ6Á`Pÿt8ôööê}êééÑrF(Òß¿½^/ýýýÇå3AAAAXªˆ"‚ ‚ ‚ lj£PÌݰ3 AB‰'j³”¢ŠLÍÛ«Ÿ=×"Òv<ìV+› £$ävc­Õê‰)6 ìp@¥‚×nÇU«a­V 9P*a«V V«P.ã°XðW*P*á´Xð•ËP,â*•ð6:ªÔ˜’T|€*¯ö–}-Ûí”J6ÅF±kÁb¡`µÖ?‹…\£Ø5[©`ÙJ…\£«x®ZÕ²‰OÌJë4O½C¸ÇCÅ餿rañzëÅ0~?åF·ôœÃA¸««þÙ…BX­VÝE] *J\Q’ŠR~ñ‹_ðÿ÷ÿR2_¶ùÿ€/›>Œ6XkVj–ús\/¹¨þ²Êgû>ËmŸ¹mÑSUTñq­V#“ɰwï^~õ«_ñÝï~—»ï¾›+¯¼rQ÷o>œ¬B,ξÌBÈñÀ¼¯†a022ÂöíÛùÑ~Äðð0¿üå/Y»vmÓszè!þ×ÿú_üÍßü ïz×»p: sß}÷ñõ¯d2©æ7¾ñ vìØÁ·¾õ-Ö­[G>Ÿç…^à+_ù /¾øâIñ9ýîw¿ã¦›nâõ×_çæïßÌòO/çË#<ÌÃĉc¯Ù©P©ÿ=z˜Åa±ÙlM…™óE_Uh<'NpøðajµétZ‹ãããT«U²„±L&C¥R!“ÉP,u'ír#AKU7nsÅl´ŠD õo¨Ò*jµZSѧú[ GŸ’a·ÛuÇk‡ÃÅbѶÍÉžÆ1QÇ””´¨‰Dtáh»ä/˜ž4¦ÄMA„SõÌŒúîgfrrR³ ÞÔ •J5 …BA¯}œ*—ËZ–Pß +•ŠÞNmS­Võ1Íüú*1Â|¬PÇ<ó2s’„B Õj•r¹ÜtŒ3‹‡æe‹u.£Ò%ÌÉO*]B¥M¨c«ÃáЄ•0aµZuJ–ú¹ª¹Ô…Š€Jø¤.B(ÙAÍw5¾'ªíÕ±RÍwuua³ÙôwFõóÝn·žw:'¤i ‚ ‚ ‚ ‚pò#Bˆ ‚ ‚ ‚ œ´J%•JEÅv’JTBÊÑü %ÄMrй ì±ôx°5ŠyBn7ª6ârA­VVTO­F¨Q`c¯V 4žç(•ð[­P­â,ñZ,ÔJ%<å2nÀR(à­ÕÚŠ*~Àј6‹*Aêi+>«•bcrV+%…Ëd•b­F²Q$•TÙ×=ÀEóþ!ð¦m`-AÕ4¬ß Ù'½¼qo‘?8çøÉO~B4úÿ·wïÁQÕ÷ÿÇ_»ÙÍæ¾ I¸È-D H[En¡†D~•2þ¨R™V­ŠVFF¡ØVn_mmm:Q±£T‘v€A„~X.‚Ø”R¹ ÃM¹“ »l’ýþçt7Ɇ„€àù˜ÙœœóÙÏùì²'Æ÷ë¼;µlao@ÿëׯ×K/½¤ýû÷aVצ-‡„ÜxMÍuÉ’%úío«½{÷š…gΜÑĉµmÛ6¿â@Ö-[”––fŽÙ¡C8p@íÛ·÷Û/77W)))mfjjj´páB½òÊ+š4i’&L˜ ì³µ.ʇ”Kã$¥J:.)åêãõíÛ×¼¶ÔÔÔø³z<óŽÛ¾Å¦õ m[Ã(5ŠB.¾ß÷í®q­A ƒ¶0ŠH;u•$EGG›¡¾„„sߘ˜s߈ˆIRTT”Y jt¸2ºfH2ƒF€Ãè˜aQDD„ÒÒÒ”••¥øøx¿ý<¨Y³fé³Ï>“$5J¿ÿýïýÎ_’:¤_|Q;vìPHHˆÆŒ£¬¬,uïÞ½Á:7gMs=Öãïÿ»^{í5íÙ³G;vTFF†,XàŒ¸Ú×:ÿÆ^ã–¾ÆmêWªO>ù¤Ú·o¯… J’fΜ©””=öØcW[ºR”_TTô®>ÍõÉ'ŸèÁl´èW]iýT~õq|ß­ý•µQd*ÉìÂd±X£ÐÐPY­VóNܱ±±fáªñ~5:XH2ïÈí¨0B’ü:\cQ ŠÛ•ïÏdøþìåËè¶'ɼÖÜn·ßÏhn·ÛüYÎ7Xf|íBðýÌ2:'\.—Yü_?¸`t\0:4øòz½r»Ý ‚ ¾ç¨Ruuu£A3£+QcƒúÝ|çQn¾ãÝlÿ[з£ƒï¶ú£Ãƒïvß®F Áø<–þn0:Bß7®Á¾ŠŒ‡Ão>Fg$#Da\ƒ‡ù<áááfÂèºd\[ìv»ÙÉèšd·ÛÍnKÆ6›M111 ®#¾×&·'!ÍTQQaÞ ªwWT©©©1ï~(¨ræÌmÍΖÛ÷×6Ë$ýXÒIÿ+i£¤ÓW¾e•T§+u×3Ÿ{N¯deÝ€³o@Eô6lМ9s´ÿ~;vL£FÒܹsõÐCÉjµ*;;[sçÎÕ§Ÿ~ª.]ºø—––¦yóæiРAÚ¾}»~ô£Éëõjâĉ4hžzê)ÅÆÆ*77W¿øÅ/ô¯ý«A8À÷ëk=®9çžžž®çŸ^©©©*--ÕŒ3ät:õ—¿üÅÜïØ±c=z´æÏŸ¯ñãÇKº¨˜7ož¶oß®ž={J’Ž?®Ñ£GkÞ¼y?~¼¬V«>þøc½ýöÛÊÉÉñ›[KÖ´¾ë±‹E‹/Ö”)StñâEÍž=[‹Eï½÷^ƒý£5ó4ns_–Œéëßÿþ·~ö³Ÿ™]oúöí« 6¨GW[’~ò“Ÿ¨C‡zùå—ÍmÝŽ;tß}÷µªÈ833Ó SDDD˜Å¸N§Ó,æ5Š`ÃÂÂÌ¢ÙÆ:b u|¯YÍá{-óUQQÑh»Ëåºj§0—Ëe´û𣤤¤YÈŒ®†™êÍÅ8ÚÚÚFÇ®­­õ &¸\®F‹ûÝn·ÙM ±ðƒ1–1^ ®FÁ 1 F‡«íÓTÇ›ºº:3DèßøÍ2¸VFx PèÖè˜ÐØ÷m6[À.<¾ê m4$hµZÍN?õçèt:l7BõÇ SHHˆ_€A’X°Z­æg®ÍfóëÞàp8Ìî¾á ãø˜˜˜Ý*ÂÃÃÕ¡C‡çS¿3 y„´1/¿ü²^ûŸÿÑeß_Ûµ{ßÖ~†Jº,)&<\N˜ CGŽ(::ZÛ¶mkPx×Eô^¯WUUU:vì˜Ö­[§7ÞxC+V¬PZZš¦L™¢þýûkÖ¬Y~Ç.]ºTûöíS–OÐÅb±hÛ¶mºï¾ûݯ˜»þ×zÜÕÚ¿þv›Í¦ªªªÁ—Ë¥èèhy<ž&÷klÌ–¬i}­]ÂÂBÍ™3Gü± ü‚Í „´fþÆmîëÑ’1}}ñÅzâ‰'´oß>IRŸ>}´qãF%''_uì@ªªª4mÚ4EEEéý÷ß¿æqn´*55UG«W¯VIII³­­­m“Ý€¶ŒÿÃÐÕx½ê” _<ÿ¼vîÜ©ó%%ÊzóM5ªAÑtçÎ%]é$q3KHHPII‰¼^oƒÇÕ ÿ}9Neffêøñã:q℞yæ­X±B“'O¾!Ç]/ f_çÎSBB‚ß~ ö+..ntÌk]ÓÖ®ÇÔ©S©þóŸr¹\æó¶ÄõzO|—/^¬‰'š_7N›7onÕ˜‘‘‘úÓŸþ¤µk×¶vz7ŒÇãÑ©S§Ô¯_?-^¼XÅÅÅÚ½{·222šÕ]û-G  ™={¶öìÙ£³.è™™>|¸,KÀý5bĽõÖ[ßá,¯¿ûï¿_Û·oo°ý³Ï>ÓàÁƒ›=ŽÅbÑ™3g$] <üðÃÚ°aƒ6mÚt]Žkêµh±cÇjÍš5 ¶ôÑG;v¬ùuzzz£Á€­[·6ØÖš5mízìܹSóçÏWRR’l6›$ qãz½'¾+þóŸµuëVÍœ9ÓÜ6sæL-Z´Heee³fÍ3ÆüÚb±(??¿Á~6›MQQQ×}Î×ËòåËUYY©xÀÜvÏ=÷hÍš5ª¬¬Ô¦M›4fÌ¿î2¾„-G  ILLÔàÁƒ[ÅfùꫯôüóÏë©§ž øï!--M[·nÕåË—µjÕ* 6L!!!ßñL€[ €[À˜1c´`Á=÷Üsm®Sˆl¹ZÀ%))I~ø¡V¬X¡;ï¼S:uÒ‚ ôæ›oj„ Ž×ؘ[¶lQxx¸~øÃ*&&FÇ—ËåÒûï¿ß䜚sœ$effêÑGUTT”žyæ™&C8ν±í½zõRvv¶>üðC%''«GZ½zµ²³³Õ³gOs¿äädeggkõêÕêÑ£‡ºwï®wß}WûÛߌÙÜ5mLk×cÙ²er¹\:t¨¢££•‘‘av¨¿ÆhÍü[ã–¼WÓb±(66V#GŽÔòåËõè£jÏž=êÕ«WƒãƯeË–iÕªUúÞ÷¾'§Ó©´´4=ðÀzúé§5tèP­[·N4ŠÐ‡ÖÔ©Su÷Ýw«¬¬L»víRVV–ìv{°§Ü–„Ü‚µdÉíÙ³G]ºtQFF†  %K–¨ªª*ØÓp“ðz½Úºu«222Ô¯_?ýç?ÿÑ| œœ 2$ØÓnk/·†¸åíß¿_™™™Z¹rßB{bIDAT¥ÂÂÂ4yòd=òÈ#JMM•ÕÊ=Cø;~ü¸V®\©åË—ëðáÃ5j”f̘¡ŒŒ >3€6‚@Àm¤¨¨HK—.Õ_ÿúWíß¿_;wÖ¤I“ôÈ#èÞ{ï•Åb öÉéÓ§µjÕ*­\¹R»wïVbb¢&Mš¤Ÿÿüç0`@°§ !·©ÜÜ\­\¹R+W®Ô‘#GÔ£GMœ8QéééúÁ~ ˆˆˆ`OÀ TWW§½{÷jÓ¦MZ¿~½vîÜ©ØØXeddèá‡Ö˜1cd³Ù‚=M€¾üòK­\¹R7nÔäp84räH¥§§+--M”Õj ö4´Ò™3g´yófmÚ´I[·nUaa¡:vì¨ôôtMš4Iééé ö44ø9wîœY0¾eË?^‰‰‰=z´FŒ¡aÆiРA7£Gj×®]Úµk—¶oß®ÜÜ\…‡‡+55Õ |õïß_‹%ØSÐBB×ëÕ¾}û´yófmß¾]»víRII‰ÂÂÂ4xð` 6LÇ×ðáÃÕ¹sç`O¸­UTTh÷îÝÊÉÉ1C EEEr84hRSS•––¦Q£F)<<<ØÓÐJBÐl^¯Wyyyf±yNNŽ<¨ÚÚZuéÒEƒVÿþýÕ¿ 0@={ö”Õj ö´[Îùóçµÿ~íÝ»Wû÷ï×¾}û”››«ÚÚZuíÚUÇ×°aÃ4lØ0 ƒ} À5©®®V~~¾òóóuòäI}ú¨W¯^æß{÷î-»Ýä³Ð–@й\.åååéèÑ£füÑ£G•ŸŸ¯sçΩ®®N’©¤¤$%%%™!‘¤¤$uíÚU]»vU‡(šGP\¸pA:uêTƒàG~~¾ Í}ãââ”””¤äädõîÝÛ/ijp3!€6ÍãñèôéÓúæ›otîÜ98qÂïqêÔ)ÕÔÔ˜ûÇÅÅ©S§NºãŽ;þÙµkW‚#h–K—.éܹsæû¯±?Ož<©ªª*óß÷`rr²ßãÎ;ïTlllÏÀ­‚@njGgÏžÕÙ³gUPPðÏââb¿ãÚµk§ÄÄD%$$(>>^ jß¾}ÀmQQQA:C\/n·[ÅÅÅ***Raa¡.\¸ ¢¢"©¸¸¸Á¶¢¢"¿°QDDD“A£N:©[·nŠŒŒ âY¸]ÀmÁår™‘ .¨  À Åÿ¾a€K—.ù*§Ói>âââë·Íétúm‹‰‰‘ÓéTXX˜"##-›Í¤¸¹•••Éív«ªªJª¬¬TYY™ÊËËÍGYY™ù¨¿½¼¼Ü¯‹‡$…„„(!!ÁïѾ}{¿¯;vìh>œNgÎ"4¢ªªÊ/$RRRâ2(--m<0ÂÕÕÕÇ QLLŒ"##åp8«°°0…‡‡û…GŒý$)<<\aaa’¤ØØXY,Ùív³kIdd¤BCC%]ébáp8>¿ï¾ÍUZZð{^¯Weee þîv»Íu¨¨¨PMMêêêT^^.éJ@çÒ¥Kº|ù²òp»Ýºxñ¢ªªªäv»UVVfîˆÝn÷ ãÄÅÅ éÄÅÅù…=âãã[´Ж®3Ç£òòr]¼xQååår»Ýª¬¬lzp¹\*//×¥K—är¹TZZj)Œ „$UVVÊãñ \´E+qqq’®t\‰ŒŒ”ÍfStt´¢¢¢äp8ät:ÍPK\\œ–‰•ÃáPdd¤bbbeî ·#!ÀMÎèÀ!IeeejêW~Muû$**Jv»=à÷£££e³Ù$ý·ƒ àÆ"p“±{h!7›¤ÁžšïÿˆASþ4IEND®B`‚patroni-3.2.2/docs/ha_multi_dc.rst000066400000000000000000000053361455170150700171400ustar00rootroot00000000000000.. _ha_multi_dc: =================== HA multi datacenter =================== The high availability of a PostgreSQL cluster deployed in multiple data centers is based on replication, which can be synchronous or asynchronous (`replication_modes `_). In both cases, it is important to be clear about the following concepts: - Postgres can run as primary or standby leader only when it owns the leading key and can update the leading key. - You should run the odd number of etcd, ZooKeeper or Consul nodes: 3 or 5! Synchronous Replication ----------------------- To have a multi DC cluster that can automatically tolerate a zone drop, a minimum of 3 is required. The architecture diagram would be the following: .. image:: _static/multi-dc-synchronous-replication.png We must deploy a cluster of etcd, ZooKeeper or Consul through the different DC, with a minimum of 3 nodes, one in each zone. Regarding postgres, we must deploy at least 2 nodes, in different DC. Then you have to set ``synchronous_mode: true`` in the global :ref:`dynamic configuration `. This enables sync replication and the primary node will choose one of the nodes as synchronous. Asynchronous Replication ------------------------ With only two data centers it would be better to have two independent etcd clusters and run Patroni :ref:`standby cluster ` in the second data center. If the first site is down, you can MANUALLY promote the ``standby_cluster``. The architecture diagram would be the following: .. image:: _static/multi-dc-asynchronous-replication.png Automatic promotion is not possible, because DC2 will never able to figure out the state of DC1. You should not use ``pg_ctl promote`` in this scenario, you need "manually promote" the healthy cluster by removing ``standby_cluster`` section from the :ref:`dynamic configuration `. .. warning:: If the source cluster is still up and running and you promote the standby cluster you create a split-brain. In case you want to return to the "initial" state, there are only two ways of resolving it: - Add the standby_cluster section back and it will trigger pg_rewind, but there are chances that pg_rewind will fail. - Rebuild the standby cluster from scratch. Before promoting standby cluster one have to manually ensure that the source cluster is down (STONITH). When DC1 recovers, the cluster has to be converted to a standby cluster. Before doing that you may manually examine the database and extract all changes that happened between the time when network between DC1 and DC2 has stopped working and the time when you manually stopped the cluster in DC1. Once extracted, you may also manually apply these changes to the cluster in DC2. patroni-3.2.2/docs/index.rst000066400000000000000000000042411455170150700157710ustar00rootroot00000000000000.. Patroni documentation master file, created by sphinx-quickstart on Mon Dec 19 16:54:09 2016. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Introduction ============ Patroni is a template for high availability (HA) PostgreSQL solutions using Python. For maximum accessibility, Patroni supports a variety of distributed configuration stores like `ZooKeeper `__, `etcd `__, `Consul `__ or `Kubernetes `__. Database engineers, DBAs, DevOps engineers, and SREs who are looking to quickly deploy HA PostgreSQL in datacenters — or anywhere else — will hopefully find it useful. We call Patroni a "template" because it is far from being a one-size-fits-all or plug-and-play replication system. It will have its own caveats. Use wisely. There are many ways to run high availability with PostgreSQL; for a list, see the `PostgreSQL Documentation `__. Currently supported PostgreSQL versions: 9.3 to 16. **Note to Citus users**: Starting from 3.0 Patroni nicely integrates with the `Citus `__ database extension to Postgres. Please check the :ref:`Citus support page ` in the Patroni documentation for more info about how to use Patroni high availability together with a Citus distributed cluster. **Note to Kubernetes users**: Patroni can run natively on top of Kubernetes. Take a look at the :ref:`Kubernetes ` chapter of the Patroni documentation. .. toctree:: :maxdepth: 2 :caption: Contents: README installation patroni_configuration rest_api patronictl replica_bootstrap replication_modes watchdog pause dcs_failsafe_mode kubernetes citus existing_data security ha_multi_dc faq releases CONTRIBUTING Indices and tables ================== .. ifconfig:: builder == 'html' * :ref:`genindex` * :ref:`modindex` * :ref:`search` .. ifconfig:: builder != 'html' * :ref:`genindex` * :ref:`search` patroni-3.2.2/docs/installation.rst000066400000000000000000000140231455170150700173620ustar00rootroot00000000000000.. _installation: Installation ============ Pre-requirements for Mac OS --------------------------- To install requirements on a Mac, run the following: .. code-block:: shell brew install postgresql etcd haproxy libyaml python .. _psycopg2_install_options: Psycopg ------- Starting from `psycopg2-2.8`_ the binary version of psycopg2 will no longer be installed by default. Installing it from the source code requires C compiler and postgres+python dev packages. Since in the python world it is not possible to specify dependency as ``psycopg2 OR psycopg2-binary`` you will have to decide how to install it. There are a few options available: 1. Use the package manager from your distro .. code-block:: shell sudo apt-get install python3-psycopg2 # install psycopg2 module on Debian/Ubuntu sudo yum install python3-psycopg2 # install psycopg2 on RedHat/Fedora/CentOS 2. Specify one of `psycopg`, `psycopg2`, or `psycopg2-binary` in the :ref:`list of dependencies ` when installing Patroni with pip. .. _extras: General installation for pip ---------------------------- Patroni can be installed with pip: .. code-block:: shell pip install patroni[dependencies] where ``dependencies`` can be either empty, or consist of one or more of the following: etcd or etcd3 `python-etcd` module in order to use Etcd as Distributed Configuration Store (DCS) consul `python-consul` module in order to use Consul as DCS zookeeper `kazoo` module in order to use Zookeeper as DCS exhibitor `kazoo` module in order to use Exhibitor as DCS (same dependencies as for Zookeeper) kubernetes `kubernetes` module in order to use Kubernetes as DCS in Patroni raft `pysyncobj` module in order to use python Raft implementation as DCS aws `boto3` in order to use AWS callbacks all all of the above (except psycopg family) psycopg `psycopg[binary]>=3.0.0` module psycopg2 `psycopg2>=2.5.4` module psycopg2-binary `psycopg2-binary` module For example, the command in order to install Patroni together with psycopg3, dependencies for Etcd as a DCS, and AWS callbacks is: .. code-block:: shell pip install patroni[psycopg3,etcd3,aws] Note that external tools to call in the replica creation or custom bootstrap scripts (i.e. WAL-E) should be installed independently of Patroni. .. _package_installation: Package installation on Linux ----------------------------- Patroni packages may be available for your operating system, produced by the Postgres community for: * RHEL, RockyLinux, AlmaLinux; * Debian and Ubuntu; * SUSE Enterprise Linux. You can also find packages for direct dependencies of Patroni, like python modules that might not be available in the official operating system repositories. For more information see the `PGDG repository`_ documentation. If you are on a RedHat Enterprise Linux derivative operating system you may also require packages from EPEL, see `EPEL repository`_ documentation. Once you have installed the PGDG repository for your OS you can install patroni. .. note:: Patroni packages are not maintained by the Patroni developers, but rather by the Postgres community. If you require support please first try connecting on `Postgres slack`_. Installing on Debian derivatives ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ With PGDG repo installed, see :ref:`above `, install Patroni via apt run: .. code-block:: shell apt-get install patroni Installing on RedHat derivatives ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ With PGDG repo installed, see :ref:`above `, install patroni with an etcd DCS via dnf on RHEL 9 (and derivatives) run: .. code-block:: shell dnf install patroni patroni-etcd You can install etcd from PGDG if your RedHat derivative distribution does not provide packages. On the nodes that will host the DCS run: .. code-block:: shell dnf install 'dnf-command(config-manager)' dnf config-manager --enable pgdg-rhel9-extras dnf install etcd You can replace the version of RHEL with `8` in the repo to make `pgdg-rhel8-extras` if needed. The repo name is still `pgdg-rhelN-extras` on RockyLinux, AlmaLinux, Oracle Linux, etc... Installing on SUSE Enterprise Linux ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ You might need to enable the SUSE PackageHub repositories for some dependencies. see `SUSE PackageHub`_ documentation. For SLES 15 with PGDG repo installed, see :ref:`above `, you can install patroni using: .. code-block:: shell zypper install patroni patroni-etcd With the SUSE PackageHub repo enabled you can also install etcd: .. code-block:: shell SUSEConnect -p PackageHub/15.5/x86_64 zypper install etcd Upgrading --------- Upgrading patroni is a very simple process, just update the software installation and restart the Patroni daemon on each node in the cluster. However, restarting the Patroni daemon will result in a Postgres database restart. In some situations this may cause a failover of the primary node in your cluster, therefore it is recommended to put the cluster into maintenance mode until the Patroni daemon restart has been completed. To put the cluster in maintenance mode, run the following command on one of the patroni nodes: .. code-block:: shell patronictl pause --wait Then on each node in the cluster, perform the package upgrade required for your OS: .. code-block:: shell apt-get update && apt-get install patroni patroni-etcd Restart the patroni daemon process on each node: .. code-block:: shell systemctl restart patroni Then finally resume monitoring of Postgres with patroni to take it out of maintenance mode: .. code-block:: shell patronictl resume --wait The cluster will now be full operational with the new version of Patroni. .. _psycopg2-2.8: http://initd.org/psycopg/articles/2019/04/04/psycopg-28-released/ .. _PGDG repository: https://www.postgresql.org/download/linux/ .. _EPEL repository: https://docs.fedoraproject.org/en-US/epel/ .. _SUSE PackageHub: https://packagehub.suse.com/how-to-use/ .. _Postgres slack: http://pgtreats.info/slack-invite patroni-3.2.2/docs/kubernetes.rst000066400000000000000000000103371455170150700170340ustar00rootroot00000000000000.. _kubernetes: Using Patroni with Kubernetes ============================= Patroni can use Kubernetes objects in order to store the state of the cluster and manage the leader key. That makes it capable of operating Postgres in Kubernetes environment without any consistency store, namely, one doesn't need to run an extra Etcd deployment. There are two different type of Kubernetes objects Patroni can use to store the leader and the configuration keys, they are configured with the `kubernetes.use_endpoints` or `PATRONI_KUBERNETES_USE_ENDPOINTS` environment variable. Use Endpoints ------------- Despite the fact that this is the recommended mode, it is turned off by default for compatibility reasons. When it is on, Patroni stores the cluster configuration and the leader key in the `metadata: annotations` fields of the respective `Endpoints` it creates. Changing the leader is safer than when using `ConfigMaps`, since both the annotations, containing the leader information, and the actual addresses pointing to the running leader pod are updated simultaneously in one go. Use ConfigMaps -------------- In this mode, Patroni will create ConfigMaps instead of Endpoints and store keys inside meta-data of those ConfigMaps. Changing the leader takes at least two updates, one to the leader ConfigMap and another to the respective Endpoint. To direct the traffic to the Postgres leader you need to configure the Kubernetes Postgres service to use the label selector with the `role_label` (configured in patroni configuration). Note that in some cases, for instance, when running on OpenShift, there is no alternative to using ConfigMaps. Configuration ------------- Patroni Kubernetes :ref:`settings ` and :ref:`environment variables ` are described in the general chapters of the documentation. .. _kubernetes_role_values: Customize role label ^^^^^^^^^^^^^^^^^^^^ By default, Patroni will set corresponding labels on the pod it runs in based on node's role, such as ``role=master``. The key and value of label can be customized by `kubernetes.role_label`, `kubernetes.leader_label_value`, `kubernetes.follower_label_value` and `kubernetes.standby_leader_label_value`. Note that if you migrate from default role labels to custom ones, you can reduce downtime by following migration steps: 1. Add a temporary label using original role value for the pod with `kubernetes.tmp_role_label` (like ``tmp_role``). Once pods are restarted they will get following labels set by Patroni: .. code:: YAML labels: cluster-name: foo role: master tmp_role: master 2. After all pods have been updated, modify the service selector to select the temporary label. .. code:: YAML selector: cluster-name: foo tmp_role: master 3. Add your custom role label (e.g., set `kubernetes.leader_label_value=primary`). Once pods are restarted they will get following new labels set by Patroni: .. code:: YAML labels: cluster-name: foo role: primary tmp_role: master 4. After all pods have been updated again, modify the service selector to use new role value. .. code:: YAML selector: cluster-name: foo role: primary 5. Finally, remove the temporary label from your configuration and update all pods. .. code:: YAML labels: cluster-name: foo role: primary Examples -------- - The `kubernetes `__ folder of the Patroni repository contains examples of the Docker image, and the Kubernetes manifest to test Patroni Kubernetes setup. Note that in the current state it will not be able to use PersistentVolumes because of permission issues. - You can find the full-featured Docker image that can use Persistent Volumes in the `Spilo Project `_. - There is also a `Helm chart `_ to deploy the Spilo image configured with Patroni running using Kubernetes. - In order to run your database clusters at scale using Patroni and Spilo, take a look at the `postgres-operator `_ project. It implements the operator pattern to manage Spilo clusters. patroni-3.2.2/docs/patroni_configuration.rst000066400000000000000000000353661455170150700213010ustar00rootroot00000000000000.. _patroni_configuration: Patroni configuration ===================== .. toctree:: :hidden: dynamic_configuration yaml_configuration ENVIRONMENT There are 3 types of Patroni configuration: - Global :ref:`dynamic configuration `. These options are stored in the DCS (Distributed Configuration Store) and applied on all cluster nodes. Dynamic configuration can be set at any time using :ref:`patronictl_edit_config` tool or Patroni :ref:`REST API `. If the options changed are not part of the startup configuration, they are applied asynchronously (upon the next wake up cycle) to every node, which gets subsequently reloaded. If the node requires a restart to apply the configuration (for `PostgreSQL parameters `__ with context postmaster, if their values have changed), a special flag ``pending_restart`` indicating this is set in the members.data JSON. Additionally, the node status indicates this by showing ``"restart_pending": true``. - Local :ref:`configuration file ` (patroni.yml). These options are defined in the configuration file and take precedence over dynamic configuration. ``patroni.yml`` can be changed and reloaded at runtime (without restart of Patroni) by sending SIGHUP to the Patroni process, performing ``POST /reload`` REST-API request or executing :ref:`patronictl_reload`. Local configuration can be either a single YAML file or a directory. When it is a directory, all YAML files in that directory are loaded one by one in sorted order. In case a key is defined in multiple files, the occurrence in the last file takes precedence. - :ref:`Environment configuration `. It is possible to set/override some of the "Local" configuration parameters with environment variables. Environment configuration is very useful when you are running in a dynamic environment and you don't know some of the parameters in advance (for example it's not possible to know your external IP address when you are running inside ``docker``). .. _important_configuration_rules: Important rules --------------- PostgreSQL parameters controlled by Patroni ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Some of the PostgreSQL parameters **must hold the same values on the primary and the replicas**. For those, **values set either in the local patroni configuration files or via the environment variables take no effect**. To alter or set their values one must change the shared configuration in the DCS. Below is the actual list of such parameters together with the default values: - **max_connections**: 100 - **max_locks_per_transaction**: 64 - **max_worker_processes**: 8 - **max_prepared_transactions**: 0 - **wal_level**: hot_standby - **track_commit_timestamp**: off For the parameters below, PostgreSQL does not require equal values among the primary and all the replicas. However, considering the possibility of a replica to become the primary at any time, it doesn't really make sense to set them differently; therefore, **Patroni restricts setting their values to the** :ref:`dynamic configuration `. - **max_wal_senders**: 5 - **max_replication_slots**: 5 - **wal_keep_segments**: 8 - **wal_keep_size**: 128MB These parameters are validated to ensure they are sane, or meet a minimum value. There are some other Postgres parameters controlled by Patroni: - **listen_addresses** - is set either from ``postgresql.listen`` or from ``PATRONI_POSTGRESQL_LISTEN`` environment variable - **port** - is set either from ``postgresql.listen`` or from ``PATRONI_POSTGRESQL_LISTEN`` environment variable - **cluster_name** - is set either from ``scope`` or from ``PATRONI_SCOPE`` environment variable - **hot_standby: on** - **wal_log_hints: on** - for Postgres 9.4 and newer. To be on the safe side parameters from the above lists are not written into ``postgresql.conf``, but passed as a list of arguments to the ``pg_ctl start`` which gives them the highest precedence, even above `ALTER SYSTEM `__ There also are some parameters like **postgresql.listen**, **postgresql.data_dir** that **can be set only locally**, i.e. in the Patroni :ref:`config file ` or via :ref:`configuration ` variable. In most cases the local configuration will override the dynamic configuration. When applying the local or dynamic configuration options, the following actions are taken: - The node first checks if there is a `postgresql.base.conf` file or if the ``custom_conf`` parameter is set. - If the ``custom_conf`` parameter is set, the file it specifies is used as the base configuration, ignoring `postgresql.base.conf` and `postgresql.conf`. - If the ``custom_conf`` parameter is not set and `postgresql.base.conf` exists, it contains the renamed "original" configuration and is used as the base configuration. - If there is no ``custom_conf`` nor `postgresql.base.conf`, the original `postgresql.conf` is renamed to `postgresql.base.conf` and used as the base configuration. - The dynamic options (with the exceptions above) are dumped into the `postgresql.conf` and an include is set in `postgresql.conf` to the base configuration (either `postgresql.base.conf` or the file at ``custom_conf``). Therefore, we would be able to apply new options without re-reading the configuration file to check if the include is present or not. - Some parameters that are essential for Patroni to manage the cluster are overridden using the command line. - If an option that requires restart is changed (we should look at the context in pg_settings and at the actual values of those options), a pending_restart flag is set on that node. This flag is reset on any restart. The parameters would be applied in the following order (run-time are given the highest priority): 1. load parameters from file `postgresql.base.conf` (or from a ``custom_conf`` file, if set) 2. load parameters from file `postgresql.conf` 3. load parameters from file `postgresql.auto.conf` 4. run-time parameter using `-o --name=value` This allows configuration for all the nodes (2), configuration for a specific node using ``ALTER SYSTEM`` (3) and ensures that parameters essential to the running of Patroni are enforced (4), as well as leaves room for configuration tools that manage `postgresql.conf` directly without involving Patroni (1). .. _shared_memory_gucs: PostgreSQL parameters that touch shared memory ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ PostgreSQL has some parameters that determine the size of the shared memory used by them: - **max_connections** - **max_prepared_transactions** - **max_locks_per_transaction** - **max_wal_senders** - **max_worker_processes** Changing these parameters require a PostgreSQL restart to take effect, and their shared memory structures cannot be smaller on the standby nodes than on the primary node. As explained before, Patroni restrict changing their values through :ref:`dynamic configuration `, which usually consists of: 1. Applying changes through :ref:`patronictl_edit_config` (or via REST API ``/config`` endpoint) 2. Restarting nodes through :ref:`patronictl_restart` (or via REST API ``/restart`` endpoint) **Note:** please keep in mind that you should perform a restart of the PostgreSQL nodes through :ref:`patronictl_restart` command, or via REST API ``/restart`` endpoint. An attempt to restart PostgreSQL by restarting the Patroni daemon, e.g. by executing ``systemctl restart patroni``, can cause a failover to occur in the cluster, if you are restarting the primary node. However, as those settings manage shared memory, some extra care should be taken when restarting the nodes: * If you want to **increase** the value of any of those settings: 1. Restart all standbys first 2. Restart the primary after that * If you want to **decrease** the value of any of those settings: 1. Restart the primary first 2. Restart all standbys after that **Note:** if you attempt to restart all nodes in one go after **decreasing** the value of any of those settings, Patroni will ignore the change and restart the standby with the original setting value, thus requiring that you restart the standbys again later. Patroni does that to prevent the standby to enter in an infinite crash loop, because PostgreSQL quits with a `FATAL` message if you attempt to set any of those parameters to a value lower than what is visible in ``pg_controldata`` on the Standby node. In other words, we can only decrease the setting on the standby once its ``pg_controldata`` is up-to-date with the primary in regards to these changes on the primary. More information about that can be found at `PostgreSQL Administrator's Overview `__. Patroni configuration parameters ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Also the following Patroni configuration options **can be changed only dynamically**: - **ttl**: 30 - **loop_wait**: 10 - **retry_timeouts**: 10 - **maximum_lag_on_failover**: 1048576 - **max_timelines_history**: 0 - **check_timeline**: false - **postgresql.use_slots**: true Upon changing these options, Patroni will read the relevant section of the configuration stored in DCS and change its run-time values. Patroni nodes are dumping the state of the DCS options to disk upon for every change of the configuration into the file ``patroni.dynamic.json`` located in the Postgres data directory. Only the leader is allowed to restore these options from the on-disk dump if these are completely absent from the DCS or if they are invalid. .. _validate_generate_config: Configuration generation and validation --------------------------------------- Patroni provides command-line interfaces for a Patroni :ref:`local configuration ` generation and validation. Using the ``patroni`` executable you can: - Create a sample local Patroni configuration; - Create a Patroni configuration file for the locally running PostgreSQL instance (e.g. as a preparation step for the :ref:`Patroni integration `); - Validate a given Patroni configuration file. .. _generate_sample_config: Sample Patroni configuration ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code:: text patroni --generate-sample-config [configfile] Description """"""""""" Generate a sample Patroni configuration file in ``yaml`` format. Parameter values are defined using the :ref:`Environment configuration `, otherwise, if not set, the defaults used in Patroni or the ``#FIXME`` string for the values that should be later defined by the user. Some default values are defined based on the local setup: - **postgresql.listen**: the IP address returned by ``gethostname`` call for the current machine's hostname and the standard ``5432`` port. - **postgresql.connect_address**: the IP address returned by ``gethostname`` call for the current machine's hostname and the standard ``5432`` port. - **postgresql.authentication.rewind**: is only defined if the PostgreSQL version can be defined from the binary and the version is 11 or later. - **restapi.listen**: IP address returned by ``gethostname`` call for the current machine's hostname and the standard ``8008`` port. - **restapi.connect_address**: IP address returned by ``gethostname`` call for the current machine's hostname and the standard ``8008`` port. Parameters """""""""" ``configfile`` - full path to the configuration file used to store the result. If not provided, the result is sent to ``stdout``. .. _generate_config: Patroni configuration for a running instance ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code:: text patroni --generate-config [--dsn DSN] [configfile] Description """"""""""" Generate a Patroni configuration in ``yaml`` format for the locally running PostgreSQL instance. Either the provided DSN (takes precedence) or PostgreSQL `environment variables `__ will be used for the PostgreSQL connection. If the password is not provided, it should be entered via prompt. All the non-internal GUCs defined in the source Postgres instance, independently if they were set through a configuration file, through the postmaster command-line, or through environment variables, will be used as the source for the following Patroni configuration parameters: - **scope**: ``cluster_name`` GUC value; - **postgresql.listen**: ``listen_addresses`` and ``port`` GUC values; - **postgresql.datadir**: ``data_directory`` GUC value; - **postgresql.parameters**: ``archive_command``, ``restore_command``, ``archive_cleanup_command``, ``recovery_end_command``, ``ssl_passphrase_command``, ``hba_file``, ``ident_file``, ``config_file`` GUC values; - **bootstrap.dcs**: all other gathered PostgreSQL GUCs. If ``scope``, ``postgresql.listen`` or ``postgresql.datadir`` is not set from the Postgres GUCs, the respective :ref:`Environment configuration ` value is used. Other rules applied for the values definition: - **name**: ``PATRONI_NAME`` environment variable value if set, otherwise the current machine's hostname. - **postgresql.bin_dir**: path to the Postgres binaries gathered from the running instance. - **postgresql.connect_address**: the IP address returned by ``gethostname`` call for the current machine's hostname and the port used for the instance connection or the ``port`` GUC value. - **postgresql.authentication.superuser**: the configuration used for the instance connection; - **postgresql.pg_hba**: the lines gathered from the source instance's ``hba_file``. - **postgresql.pg_ident**: the lines gathered from the source instance's ``ident_file``. - **restapi.listen**: IP address returned by ``gethostname`` call for the current machine's hostname and the standard ``8008`` port. - **restapi.connect_address**: IP address returned by ``gethostname`` call for the current machine's hostname and the standard ``8008`` port. Other parameters defined using :ref:`Environment configuration ` are also included into the configuration. Parameters """""""""" ``configfile`` Full path to the configuration file used to store the result. If not provided, result is sent to ``stdout``. ``dsn`` Optional DSN string for the local PostgreSQL instance to get GUC values from. Validate Patroni configuration ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code:: text patroni --validate-config [configfile] Description """"""""""" Validate the given Patroni configuration and print the information about the failed checks. Parameters """""""""" ``configfile`` Full path to the configuration file to check. If not given or file does not exist, will try to read from the ``PATRONI_CONFIG_VARIABLE`` environment variable or, if not set, from the :ref:`Patroni environment variables `. patroni-3.2.2/docs/patronictl.rst000066400000000000000000001670421455170150700170520ustar00rootroot00000000000000.. _patronictl: patronictl ========== Patroni has a command-line interface named ``patronictl``, which is used basically to interact with Patroni's REST API and with the DCS. It is intended to make it easier to perform operations in the cluster, and can easily be used by humans or scripts. .. _patronictl_configuration: Configuration ------------- ``patronictl`` uses 3 sections of the configuration: - **ctl**: how to authenticate against the Patroni REST API, and how to validate the server identity. Refer to :ref:`ctl settings ` for more details; - **restapi**: how to authenticate against the Patroni REST API, and how to validate the server identity. Only used if ``ctl`` configuration is not enough. ``patronictl`` is mainly interested in ``restapi.authentication`` section (in case ``ctl.authentication`` is missing) and ``restapi.cafile`` setting (in case ``ctl.cacert`` is missing). Refer to :ref:`REST API settings ` for more details; - DCS (e.g. **etcd**): how to contact and authenticate against the DCS used by Patroni. Those configuration options can come either from environment variables or from a configuration file. Look for the above sections in :ref:`Environment Configuration Settings ` or :ref:`YAML Configuration Settings ` to understand how you can set the options for them through environment variables or through a configuration file. If you opt for using environment variables, it's a straight forward approach. Patronictl will read the environment variables and use their values. If you opt for using a configuration file, you have different ways to inform ``patronictl`` about the file to be used. By default ``patronictl`` will attempt to load a configuration file named ``patronictl.yaml``, which is expected to be found under either of these paths, according to your system: - Mac OS X: ``~/Library/Application Support/patroni`` - Mac OS X (POSIX): ``~/.patroni`` - Unix: ``~/.config/patroni`` - Unix (POSIX): ``~/.patroni`` - Windows (roaming): ``C:\Users\\AppData\Roaming\patroni`` - Windows (not roaming): ``C:\Users\\AppData\Local\patroni`` You can override that behavior either by: - Setting the environment variable ``PATRONICTL_CONFIG_FILE`` with the path to a custom configuration file; - Using the ``-c`` / ``--config-file`` command-line argument of ``patronictl`` with the path to a custom configuration file. .. note:: If you are running ``patronictl`` in the same host as ``patroni`` daemon is running, you may just use the same configuration file if it contains all the configuration sections required by ``patronictl``. .. _patronictl_usage: Usage ----- ``patronictl`` exposes several handy operations. This section is intended to describe each of them. Before jumping into each of the sub-commands of ``patronictl``, be aware that ``patronictl`` itself has the following command-line arguments: ``-c`` / ``--config-file`` As explained before, used to provide a path to a configuration file for ``patronictl``. ``-d`` / ``--dcs-url`` / ``--dcs`` Provide a connection string to the DCS used by Patroni. This argument can be used either to override the DCS and ``namespace`` settings from the ``patronictl`` configuration, or to define it if it's missing in the configuration. The value should be in the format ``DCS://HOST:PORT/NAMESPACE``, e.g. ``etcd3://localhost:2379/service`` to connect to etcd v3 running on ``localhost`` with Patroni cluster stored under ``service`` namespace. Any part that is missing in the argument value will be replaced with the value present in the configuration or with its default. ``-k`` / ``--insecure`` Flag to bypass validation of REST API server SSL certificate. This is the synopsis for running a command from the ``patronictl``: .. code:: text patronictl [ { -c | --config-file } CONFIG_FILE ] [ { -d | --dcs-url | --dcs } DCS_URL ] [ { -k | --insecure } ] SUBCOMMAND .. note:: This is the syntax for the synopsis: - Options between square brackets are optional; - Options between curly brackets represent a "choose one of set" operation; - Options with ``[, ... ]`` can be specified multiple times; - Things written in uppercase represent a literal that should be given a value to. We will use this same syntax when describing ``patronictl`` sub-commands in the following sub-sections. Also, when describing sub-commands in the following sub-sections, the commands' synposis should be seen as a replacement for the ``SUBCOMMAND`` in the above synopsis. In the following sub-sections you can find a description of each command implemented by ``patronictl``. For sake of example, we will use the configuration files present in the GitHub repository of Patroni (files ``postgres0.yml``, ``postgres1.yml`` and ``postgres2.yml``). .. _patronictl_dsn: patronictl dsn ^^^^^^^^^^^^^^ .. _patronictl_dsn_synopsis: Synopsis """""""" .. code:: text dsn [ CLUSTER_NAME ] [ { { -r | --role } { leader | primary | standby-leader | replica | standby | any } | { -m | --member } MEMBER_NAME } ] [ --group CITUS_GROUP ] .. _patronictl_dsn_description: Description """"""""""" ``patronictl dsn`` gets the connection string for one member of the Patroni cluster. If multiple members match the parameters of this command, one of them will be chosen, prioritizing the primary node. .. _patronictl_dsn_parameters: Parameters """""""""" ``CLUSTER_NAME`` Name of the Patroni cluster. If not given, ``patronictl`` will attempt to fetch that from the ``scope`` configuration, if it exists. ``-r`` / ``--role`` Choose a member that has the given role. Role can be one of: - ``leader``: the leader of either a regular Patroni cluster or a standby Patroni cluster; or - ``primary``: the leader of a regular Patroni cluster; or - ``standby-leader``: the leader of a standby Patroni cluster; or - ``replica``: a replica of a Patroni cluster; or - ``standby``: same as ``replica``; or - ``any``: any role. Same as omitting this parameter; or ``-m`` / ``--member`` Choose a member of the cluster with the given name. ``MEMBER_NAME`` is the name of the member. ``--group`` Choose a member that is part of the given Citus group. ``CITUS_GROUP`` is the ID of the Citus group. .. _patronictl_dsn_examples: Examples """""""" Get DSN of the primary node: .. code:: bash $ patronictl -c postgres0.yml dsn batman -r primary host=127.0.0.1 port=5432 Get DSN of the node named ``postgresql1``: .. code:: bash $ patronictl -c postgres0.yml dsn batman --member postgresql1 host=127.0.0.1 port=5433 .. _patronictl_edit_config: patronictl edit-config ^^^^^^^^^^^^^^^^^^^^^^ .. _patronictl_edit_config_synopsis: Synopsis """""""" .. code:: text edit-config [ CLUSTER_NAME ] [ --group CITUS_GROUP ] [ { -q | --quiet } ] [ { -s | --set } CONFIG="VALUE" [, ... ] ] [ { -p | --pg } PG_CONFIG="PG_VALUE" [, ... ] ] [ { --apply | --replace } CONFIG_FILE ] [ --force ] .. _patronictl_edit_config_description: Description """"""""""" ``patronictl edit-config`` changes the dynamic configuration of the cluster and updates the DCS with that. .. note:: When invoked through a TTY the command attempts to show a diff of the dynamic configuration through a pager. By default, it attempts to use either ``less`` or ``more``. If you want a different pager, set the ``PAGER`` environment variable with the desired one. .. _patronictl_edit_config_parameters: Parameters """""""""" ``CLUSTER_NAME`` Name of the Patroni cluster. If not given, ``patronictl`` will attempt to fetch that from the ``scope`` configuration, if it exists. ``--group`` Change dynamic configuration of the given Citus group. If not given, ``patronictl`` will attempt to fetch that from the ``citus.group`` configuration, if it exists. ``CITUS_GROUP`` is the ID of the Citus group. ``-q`` / ``--quiet`` Flag to skip showing the configuration diff. ``-s`` / ``--set`` Set a given dynamic configuration option with a given value. ``CONFIG`` is the name of the dynamic configuration path in the YAML tree, with levels joined by ``.`` . ``VALUE`` is the value for ``CONFIG``. If it is ``null``, then ``CONFIG`` will be removed from the dynamic configuration. ``-p`` / ``--pg`` Set a given dynamic Postgres configuration option with the given value. It is essentially a shorthand for ``--s`` / ``--set`` with ``CONFIG`` prepended with ``postgresql.parameters.``. ``PG_CONFIG`` is the name of the Postgres configuration to be set. ``PG_VALUE`` is the value for ``PG_CONFIG``. If it is ``nulll``, then ``PG_CONFIG`` will be removed from the dynamic configuration. ``--apply`` Apply dynamic configuration from the given file. It is similar to specifying multiple ``-s`` / ``--set`` options, one for each configuration from ``CONFIG_FILE``. ``CONFIG_FILE`` is the path to a file containing the dynamic configuration to be applied, in YAML format. Use ``-`` if you want to read from ``stdin``. ``--replace`` Replace the dynamic configuration in the DCS with the dynamic configuration specified in the given file. ``CONFIG_FILE`` is the path to a file containing the new dynamic configuration to take effect, in YAML format. Use ``-`` if you want to read from ``stdin``. ``--force`` Flag to skip confirmation prompts when changing the dynamic configuration. Useful for scripts. .. _patronictl_edit_config_examples: Examples """""""" Change ``max_connections`` Postgres GUC: .. code:: diff patronictl -c postgres0.yml edit-config batman --pg max_connections="150" --force --- +++ @@ -1,6 +1,8 @@ loop_wait: 10 maximum_lag_on_failover: 1048576 postgresql: + parameters: + max_connections: 150 pg_hba: - host replication replicator 127.0.0.1/32 md5 - host all all 0.0.0.0/0 md5 Configuration changed Change ``loop_wait`` and ``ttl`` settings: .. code:: diff patronictl -c postgres0.yml edit-config batman --set loop_wait="15" --set ttl="45" --force --- +++ @@ -1,4 +1,4 @@ -loop_wait: 10 +loop_wait: 15 maximum_lag_on_failover: 1048576 postgresql: pg_hba: @@ -6,4 +6,4 @@ - host all all 0.0.0.0/0 md5 use_pg_rewind: true retry_timeout: 10 -ttl: 30 +ttl: 45 Configuration changed Remove ``maximum_lag_on_failover`` setting from dynamic configuration: .. code:: diff patronictl -c postgres0.yml edit-config batman --set maximum_lag_on_failover="null" --force --- +++ @@ -1,5 +1,4 @@ loop_wait: 10 -maximum_lag_on_failover: 1048576 postgresql: pg_hba: - host replication replicator 127.0.0.1/32 md5 Configuration changed .. _patronictl_failover: patronictl failover ^^^^^^^^^^^^^^^^^^^ .. _patronictl_failover_synopsis: Synopsis """""""" .. code:: text failover [ CLUSTER_NAME ] [ --group CITUS_GROUP ] [ { --leader | --primary } LEADER_NAME ] --candidate CANDIDATE_NAME [ --force ] .. _patronictl_failover_description: Description """"""""""" ``patronictl failover`` performs a manual failover in the cluster. It is designed to be used when the cluster is not healthy, e.g.: - There is no leader; or - There is no synchronous standby available in a synchronous cluster. It also allows to fail over to an asynchronous node if synchronous mode is enabled. .. note:: Nothing prevents you from running ``patronictl failover`` in a healthy cluster. However, we recommend using ``patronictl switchover`` in those cases. .. warning:: Triggering a failover can cause data loss depending on how up-to-date the promoted replica is in comparison to the primary. .. _patronictl_failover_parameters: Parameters """""""""" ``CLUSTER_NAME`` Name of the Patroni cluster. If not given, ``patronictl`` will attempt to fetch that from the ``scope`` configuration, if it exists. ``--group`` Perform a failover in the given Citus group. ``CITUS_GROUP`` is the ID of the Citus group. ``--leader`` / ``--primary`` Indicate who is the expected leader at failover time. If given, a switchover is performed instead of a failover. ``LEADER_NAME`` should match the name of the current leader in the cluster. .. warning:: This argument is deprecated and will be removed in a future release. ``--candidate`` The node to be promoted on failover. ``CANDIDATE_NAME`` is the name of the node to be promoted. ``--force`` Flag to skip confirmation prompts when performing the failover. Useful for scripts. .. _patronictl_failover_examples: Examples """""""" Fail over to node ``postgresql2``: .. code:: bash $ patronictl -c postgres0.yml failover batman --candidate postgresql2 --force Current cluster topology + Cluster: batman (7277694203142172922) -+-----------+----+-----------+ | Member | Host | Role | State | TL | Lag in MB | +-------------+----------------+---------+-----------+----+-----------+ | postgresql0 | 127.0.0.1:5432 | Leader | running | 3 | | | postgresql1 | 127.0.0.1:5433 | Replica | streaming | 3 | 0 | | postgresql2 | 127.0.0.1:5434 | Replica | streaming | 3 | 0 | +-------------+----------------+---------+-----------+----+-----------+ 2023-09-12 11:52:27.50978 Successfully failed over to "postgresql2" + Cluster: batman (7277694203142172922) -+---------+----+-----------+ | Member | Host | Role | State | TL | Lag in MB | +-------------+----------------+---------+---------+----+-----------+ | postgresql0 | 127.0.0.1:5432 | Replica | stopped | | unknown | | postgresql1 | 127.0.0.1:5433 | Replica | running | 3 | 0 | | postgresql2 | 127.0.0.1:5434 | Leader | running | 3 | | +-------------+----------------+---------+---------+----+-----------+ .. _patronictl_flush: patronictl flush ^^^^^^^^^^^^^^^^ .. _patronictl_flush_synopsis: Synopsis """""""" .. code:: text flush CLUSTER_NAME [ MEMBER_NAME [, ... ] ] { restart | switchover } [ --group CITUS_GROUP ] [ { -r | --role } { leader | primary | standby-leader | replica | standby | any } ] [ --force ] .. _patronictl_flush_description: Description """"""""""" ``patronictl flush`` discards scheduled events, if any. .. _patronictl_flush_parameters: Parameters """""""""" ``CLUSTER_NAME`` Name of the Patroni cluster. ``MEMBER_NAME`` Discard scheduled events for the given Patroni member(s). Multiple members can be specified. If no members are specified, all of them are considered. .. note:: Only used if discarding scheduled restart events. ``restart`` Discard scheduled restart events. ``switchover`` Discard scheduled switchover event. ``--group`` Discard scheduled events from the given Citus group. ``CITUS_GROUP`` is the ID of the Citus group. ``-r`` / ``--role`` Discard scheduled events for members that have the given role. Role can be one of: - ``leader``: the leader of either a regular Patroni cluster or a standby Patroni cluster; or - ``primary``: the leader of a regular Patroni cluster; or - ``standby-leader``: the leader of a standby Patroni cluster; or - ``replica``: a replica of a Patroni cluster; or - ``standby``: same as ``replica``; or - ``any``: any role. Same as omitting this parameter. .. note:: Only used if discarding scheduled restart events. ``--force`` Flag to skip confirmation prompts when performing the flush. Useful for scripts. .. _patronictl_flush_examples: Examples """""""" Discard a scheduled switchover event: .. code:: bash $ patronictl -c postgres0.yml flush batman switchover --force Success: scheduled switchover deleted Discard scheduled restart of all standby nodes: .. code:: bash $ patronictl -c postgres0.yml flush batman restart -r replica --force + Cluster: batman (7277694203142172922) -+-----------+----+-----------+---------------------------+ | Member | Host | Role | State | TL | Lag in MB | Scheduled restart | +-------------+----------------+---------+-----------+----+-----------+---------------------------+ | postgresql0 | 127.0.0.1:5432 | Leader | running | 5 | | 2023-09-12T17:17:00+00:00 | | postgresql1 | 127.0.0.1:5433 | Replica | streaming | 5 | 0 | 2023-09-12T17:17:00+00:00 | | postgresql2 | 127.0.0.1:5434 | Replica | streaming | 5 | 0 | 2023-09-12T17:17:00+00:00 | +-------------+----------------+---------+-----------+----+-----------+---------------------------+ Success: flush scheduled restart for member postgresql1 Success: flush scheduled restart for member postgresql2 Discard scheduled restart of nodes ``postgresql0`` and ``postgresql1``: .. code:: bash $ patronictl -c postgres0.yml flush batman postgresql0 postgresql1 restart --force + Cluster: batman (7277694203142172922) -+-----------+----+-----------+---------------------------+ | Member | Host | Role | State | TL | Lag in MB | Scheduled restart | +-------------+----------------+---------+-----------+----+-----------+---------------------------+ | postgresql0 | 127.0.0.1:5432 | Leader | running | 5 | | 2023-09-12T17:17:00+00:00 | | postgresql1 | 127.0.0.1:5433 | Replica | streaming | 5 | 0 | 2023-09-12T17:17:00+00:00 | | postgresql2 | 127.0.0.1:5434 | Replica | streaming | 5 | 0 | 2023-09-12T17:17:00+00:00 | +-------------+----------------+---------+-----------+----+-----------+---------------------------+ Success: flush scheduled restart for member postgresql0 Success: flush scheduled restart for member postgresql1 .. _patronictl_history: patronictl history ^^^^^^^^^^^^^^^^^^ .. _patronictl_history_synopsis: Synopsis """""""" .. code:: text history [ CLUSTER_NAME ] [ --group CITUS_GROUP ] [ { -f | --format } { pretty | tsv | json | yaml } ] .. _patronictl_history_description: Description """"""""""" ``patronictl history`` shows a history of failover and switchover events from the cluster, if any. The following information is included in the output: ``TL`` Postgres timeline at which the event occurred. ``LSN`` Postgres LSN at which the event occurred. ``Reason`` Reason fetched from the Postgres ``.history`` file. ``Timestamp`` Time when the event occurred. ``New Leader`` Patroni member that has been promoted during the event. .. _patronictl_history_parameters: Parameters """""""""" ``CLUSTER_NAME`` Name of the Patroni cluster. If not given, ``patronictl`` will attempt to fetch that from the ``scope`` configuration, if it exists. ``--group`` Show history of events from the given Citus group. ``CITUS_GROUP`` is the ID of the Citus group. If not given, ``patronictl`` will attempt to fetch that from the ``citus.group`` configuration, if it exists. ``-f`` / ``--format`` How to format the list of events in the output. Format can be one of: - ``pretty``: prints history as a pretty table; or - ``tsv``: prints history as tabular information, with columns delimited by ``\t``; or - ``json``: prints history in JSON format; or - ``yaml``: prints history in YAML format. The default is ``pretty``. ``--force`` Flag to skip confirmation prompts when performing the flush. Useful for scripts. .. _patronictl_history_examples: Examples """""""" Show the history of events: .. code:: bash $ patronictl -c postgres0.yml history batman +----+----------+------------------------------+----------------------------------+-------------+ | TL | LSN | Reason | Timestamp | New Leader | +----+----------+------------------------------+----------------------------------+-------------+ | 1 | 24392648 | no recovery target specified | 2023-09-11T22:11:27.125527+00:00 | postgresql0 | | 2 | 50331864 | no recovery target specified | 2023-09-12T11:34:03.148097+00:00 | postgresql0 | | 3 | 83886704 | no recovery target specified | 2023-09-12T11:52:26.948134+00:00 | postgresql2 | | 4 | 83887280 | no recovery target specified | 2023-09-12T11:53:09.620136+00:00 | postgresql0 | +----+----------+------------------------------+----------------------------------+-------------+ Show the history of events in YAML format: .. code:: bash $ patronictl -c postgres0.yml history batman -f yaml - LSN: 24392648 New Leader: postgresql0 Reason: no recovery target specified TL: 1 Timestamp: '2023-09-11T22:11:27.125527+00:00' - LSN: 50331864 New Leader: postgresql0 Reason: no recovery target specified TL: 2 Timestamp: '2023-09-12T11:34:03.148097+00:00' - LSN: 83886704 New Leader: postgresql2 Reason: no recovery target specified TL: 3 Timestamp: '2023-09-12T11:52:26.948134+00:00' - LSN: 83887280 New Leader: postgresql0 Reason: no recovery target specified TL: 4 Timestamp: '2023-09-12T11:53:09.620136+00:00' .. _patronictl_list: patronictl list ^^^^^^^^^^^^^^^ .. _patronictl_list_synopsis: Synopsis """""""" .. code:: text list [ CLUSTER_NAME [, ... ] ] [ --group CITUS_GROUP ] [ { -e | --extended } ] [ { -t | --timestamp } ] [ { -f | --format } { pretty | tsv | json | yaml } ] [ { -W | { -w | --watch } TIME } ] .. _patronictl_list_description: Description """"""""""" ``patronictl list`` shows information about Patroni cluster and its members. The following information is included in the output: ``Cluster`` Name of the Patroni cluster. ``Member`` Name of the Patroni member. ``Host`` Host where the member is located. ``Role`` Current role of the member. Can be one among: * ``Leader``: the current leader of a regular Patroni cluster; or * ``Standby Leader``: the current leader of a Patroni standby cluster; or * ``Sync Standby``: a synchronous standby of a Patroni cluster with synchronous mode enabled; or * ``Replica``: a regular standby of a Patroni cluster. ``State`` Current state of Postgres in the Patroni member. Some examples among the possible states: * ``running``: if Postgres is currently up and running; * ``streaming``: if a replica and Postgres is currently streaming WALs from the primary node; * ``in archive recovery``: if a replica and Postgres is currently fetching WALs from the archive; * ``stopped``: if Postgres had been shut down; * ``crashed``: if Postgres has crashed. ``TL`` Current Postgres timeline in the Patroni member. ``Lag in MB`` Amount worth of replication lag in megabytes between the Patroni member and its upstream. Besides that, the following information may be included in the output: ``System identifier`` Postgres system identifier. .. note:: Shown in the table header. Only shown if output format is ``pretty``. ``Group`` Citus group ID. .. note:: Shown in the table header. Only shown if a Citus cluster. ``Pending restart`` ``*`` indicates that the node needs a restart for some Postgres configuration to take effect. An empty value indicates the node does not require a restart. .. note:: Shown as a member attribute. Shown if: - Printing in ``pretty`` or ``tsv`` format and with extended output enabled; or - If node requires a restart. ``Scheduled restart`` Timestamp at which a restart has been scheduled for the Postgres instance managed by the Patroni member. An empty value indicates there is no scheduled restart for the member. .. note:: Shown as a member attribute. Shown if: - Printing in ``pretty`` or ``tsv`` format and with extended output enabled; or - If node has a scheduled restart. ``Tags`` Contains tags set for the Patroni member. An empty value indicates that either no tags have been configured, or that they have been configured with default values. .. note:: Shown as a member attribute. Shown if: - Printing in ``pretty`` or ``tsv`` format and with extended output enabled; or - If node has any custom tags, or any default tags with non-default values. ``Scheduled switchover`` Timestamp at which a switchover has been scheduled for the Patroni cluster, if any. .. note:: Shown in the table footer. Only shown if there is a scheduled switchover, and output format is ``pretty``. ``Maintenance mode`` If the cluster monitoring is currently paused. .. note:: Shown in the table footer. Only shown if the cluster is paused, and output format is ``pretty``. .. _patronictl_list_parameters: Parameters """""""""" ``CLUSTER_NAME`` Name of the Patroni cluster. If not given, ``patronictl`` will attempt to fetch that from the ``scope`` configuration, if it exists. ``--group`` Show information about members from the given Citus group. ``CITUS_GROUP`` is the ID of the Citus group. ``-e`` / ``--extended`` Show extended information. Force showing ``Pending restart``, ``Scheduled restart`` and ``Tags`` attributes, even if their value is empty. .. note:: Only applies to ``pretty`` and ``tsv`` output formats. ``-t`` / ``--timestamp`` Print timestamp before printing information about the cluster and its members. ``-f`` / ``--format`` How to format the list of events in the output. Format can be one of: - ``pretty``: prints history as a pretty table; or - ``tsv``: prints history as tabular information, with columns delimited by ``\t``; or - ``json``: prints history in JSON format; or - ``yaml``: prints history in YAML format. The default is ``pretty``. ``-W`` Automatically refresh information every 2 seconds. ``-w`` / ``--watch`` Automatically refresh information at the specified interval. ``TIME`` is the interval between refreshes, in seconds. .. _patronictl_list_examples: Examples """""""" Show information about the cluster in pretty format: .. code:: bash $ patronictl -c postgres0.yml list batman + Cluster: batman (7277694203142172922) -+-----------+----+-----------+ | Member | Host | Role | State | TL | Lag in MB | +-------------+----------------+---------+-----------+----+-----------+ | postgresql0 | 127.0.0.1:5432 | Leader | running | 5 | | | postgresql1 | 127.0.0.1:5433 | Replica | streaming | 5 | 0 | | postgresql2 | 127.0.0.1:5434 | Replica | streaming | 5 | 0 | +-------------+----------------+---------+-----------+----+-----------+ Show information about the cluster in pretty format with extended columns: .. code:: bash $ patronictl -c postgres0.yml list batman -e + Cluster: batman (7277694203142172922) -+-----------+----+-----------+-----------------+-------------------+------+ | Member | Host | Role | State | TL | Lag in MB | Pending restart | Scheduled restart | Tags | +-------------+----------------+---------+-----------+----+-----------+-----------------+-------------------+------+ | postgresql0 | 127.0.0.1:5432 | Leader | running | 5 | | | | | | postgresql1 | 127.0.0.1:5433 | Replica | streaming | 5 | 0 | | | | | postgresql2 | 127.0.0.1:5434 | Replica | streaming | 5 | 0 | | | | +-------------+----------------+---------+-----------+----+-----------+-----------------+-------------------+------+ Show information about the cluster in YAML format, with timestamp of execution: .. code:: bash $ patronictl -c postgres0.yml list batman -f yaml -t 2023-09-12 13:30:48 - Cluster: batman Host: 127.0.0.1:5432 Member: postgresql0 Role: Leader State: running TL: 5 - Cluster: batman Host: 127.0.0.1:5433 Lag in MB: 0 Member: postgresql1 Role: Replica State: streaming TL: 5 - Cluster: batman Host: 127.0.0.1:5434 Lag in MB: 0 Member: postgresql2 Role: Replica State: streaming TL: 5 .. _patronictl_pause: patronictl pause ^^^^^^^^^^^^^^^^ .. _patronictl_pause_synopsis: Synopsis """""""" .. code:: text pause [ CLUSTER_NAME ] [ --group CITUS_GROUP ] [ --wait ] .. _patronictl_pause_description: Description """"""""""" ``patronictl pause`` temporarily puts the Patroni cluster in maintenance mode and disables automatic failover. .. _patronictl_pause_parameters: Parameters """""""""" ``CLUSTER_NAME`` Name of the Patroni cluster. If not given, ``patronictl`` will attempt to fetch that from the ``scope`` configuration, if it exists. ``--group`` Pause the given Citus group. ``CITUS_GROUP`` is the ID of the Citus group. If not given, ``patronictl`` will attempt to fetch that from the ``citus.group`` configuration, if it exists. ``--wait`` Wait until all Patroni members are paused before returning control to the caller. .. _patronictl_pause_examples: Examples """""""" Put the cluster in maintenance mode, and wait until all nodes have been paused: .. code:: bash $ patronictl -c postgres0.yml pause batman --wait 'pause' request sent, waiting until it is recognized by all nodes Success: cluster management is paused .. _patronictl_query: patronictl query ^^^^^^^^^^^^^^^^ .. _patronictl_query_synopsis: Synopsis """""""" .. code:: text query [ CLUSTER_NAME ] [ --group CITUS_GROUP ] [ { { -r | --role } { leader | primary | standby-leader | replica | standby | any } | { -m | --member } MEMBER_NAME } ] [ { -d | --dbname } DBNAME ] [ { -U | --username } USERNAME ] [ --password ] [ --format { pretty | tsv | json | yaml } ] [ { { -f | --file } FILE_NAME | { -c | --command } SQL_COMMAND } ] [ --delimiter ] [ { -W | { -w | --watch } TIME } ] .. _patronictl_query_description: Description """"""""""" ``patronictl query`` executes a SQL command or script against a member of the Patroni cluster. .. _patronictl_query_parameters: Parameters """""""""" ``CLUSTER_NAME`` Name of the Patroni cluster. If not given, ``patronictl`` will attempt to fetch that from the ``scope`` configuration, if it exists. ``--group`` Query the given Citus group. ``CITUS_GROUP`` is the ID of the Citus group. ``-r`` / ``--role`` Choose a member that has the given role. Role can be one of: - ``leader``: the leader of either a regular Patroni cluster or a standby Patroni cluster; or - ``primary``: the leader of a regular Patroni cluster; or - ``standby-leader``: the leader of a standby Patroni cluster; or - ``replica``: a replica of a Patroni cluster; or - ``standby``: same as ``replica``; or - ``any``: any role. Same as omitting this parameter. ``-m`` / ``--member`` Choose a member that has the given name. ``MEMBER_NAME`` is the name of the member to be picked. ``-d`` / ``--dbname`` Database to connect and run the query. ``DBNAME`` is the name of the database. If not given, defaults to ``USERNAME``. ``-U`` / ``--username`` User to connect to the database. ``USERNAME`` name of the user. If not given, defaults to the operating system user running ``patronictl query``. ``--password`` Prompt for the password of the connecting user. As Patroni uses ``libpq``, alternatively you can create a ``~/.pgpass`` file or set the ``PGPASSWORD`` environment variable. ``--format`` How to format the output of the query. Format can be one of: - ``pretty``: prints query output as a pretty table; or - ``tsv``: prints query output as tabular information, with columns delimited by ``\t``; or - ``json``: prints query output in JSON format; or - ``yaml``: prints query output in YAML format. The default is ``tsv``. ``-f`` / ``--file`` Use a file as source of commands to run queries. ``FILE_NAME`` is the path to the source file. ``-c`` / ``--command`` Run the given SQL command in the query. ``SQL_COMMAND`` is the SQL command to be executed. ``--delimiter`` The delimiter when printing information in ``tsv`` format, or ``\t`` if omitted. ``-W`` Automatically re-run the query every 2 seconds. ``-w`` / ``--watch`` Automatically re-run the query at the specified interval. ``TIME`` is the interval between re-runs, in seconds. .. _patronictl_query_examples: Examples """""""" Run a SQL command as ``postgres`` user, and ask for its password: .. code:: bash $ patronictl -c postgres0.yml query batman -U postgres --password -c "SELECT now()" Password: now 2023-09-12 18:10:53.228084+00:00 Run a SQL command as ``postgres`` user, and take password from ``libpq`` environment variable: .. code:: bash $ PGPASSWORD=zalando patronictl -c postgres0.yml query batman -U postgres -c "SELECT now()" now 2023-09-12 18:11:37.639500+00:00 Run a SQL command and print in ``pretty`` format every 2 seconds: .. code:: bash $ patronictl -c postgres0.yml query batman -c "SELECT now()" --format pretty -W +----------------------------------+ | now | +----------------------------------+ | 2023-09-12 18:12:16.716235+00:00 | +----------------------------------+ +----------------------------------+ | now | +----------------------------------+ | 2023-09-12 18:12:18.732645+00:00 | +----------------------------------+ +----------------------------------+ | now | +----------------------------------+ | 2023-09-12 18:12:20.750573+00:00 | +----------------------------------+ Run a SQL command on database ``test`` and print the output in YAML format: .. code:: bash $ patronictl -c postgres0.yml query batman -d test -c "SELECT now() AS column_1, 'test' AS column_2" --format yaml - column_1: 2023-09-12 18:14:22.052060+00:00 column_2: test Run a SQL command on member ``postgresql2``: .. code:: bash $ patronictl -c postgres0.yml query batman -m postgresql2 -c "SHOW port" port 5434 Run a SQL command on any of the standbys: .. code:: bash $ patronictl -c postgres0.yml query batman -r replica -c "SHOW port" port 5433 .. _patronictl_reinit: patronictl reinit ^^^^^^^^^^^^^^^^^ .. _patronictl_reinit_synopsis: Synopsis """""""" .. code:: text reinit CLUSTER_NAME [ MEMBER_NAME [, ... ] ] [ --group CITUS_GROUP ] [ --wait ] [ --force ] .. _patronictl_reinit_description: Description """"""""""" ``patronictl reinit`` rebuilds a Postgres standby instance managed by a replica member of the Patroni cluster. .. _patronictl_reinit_parameters: Parameters """""""""" ``CLUSTER_NAME`` Name of the Patroni cluster. ``MEMBER_NAME`` Name of the replica member for which the Postgres instance will be rebuilt. Multiple replica members can be specified. If no members are specified, the command does nothing. ``--group`` Rebuild a replica member of the given Citus group. ``CITUS_GROUP`` is the ID of the Citus group. ``--wait`` Wait until the reinitialization of the Postgres standby node(s) is finished. ``--force`` Flag to skip confirmation prompts when rebuilding Postgres standby instances. Useful for scripts. .. _patronictl_reinit_examples: Examples """""""" Request a rebuild of all replica members of the Patroni cluster and immediately return control to the caller: .. code:: bash $ patronictl -c postgres0.yml reinit batman postgresql1 postgresql2 --force + Cluster: batman (7277694203142172922) -+-----------+----+-----------+ | Member | Host | Role | State | TL | Lag in MB | +-------------+----------------+---------+-----------+----+-----------+ | postgresql0 | 127.0.0.1:5432 | Leader | running | 5 | | | postgresql1 | 127.0.0.1:5433 | Replica | streaming | 5 | 0 | | postgresql2 | 127.0.0.1:5434 | Replica | streaming | 5 | 0 | +-------------+----------------+---------+-----------+----+-----------+ Success: reinitialize for member postgresql1 Success: reinitialize for member postgresql2 Request a rebuild of ``postgresql2`` and wait for it to complete: .. code:: bash $ patronictl -c postgres0.yml reinit batman postgresql2 --wait --force + Cluster: batman (7277694203142172922) -+-----------+----+-----------+ | Member | Host | Role | State | TL | Lag in MB | +-------------+----------------+---------+-----------+----+-----------+ | postgresql0 | 127.0.0.1:5432 | Leader | running | 5 | | | postgresql1 | 127.0.0.1:5433 | Replica | streaming | 5 | 0 | | postgresql2 | 127.0.0.1:5434 | Replica | streaming | 5 | 0 | +-------------+----------------+---------+-----------+----+-----------+ Success: reinitialize for member postgresql2 Waiting for reinitialize to complete on: postgresql2 Reinitialize is completed on: postgresql2 .. _patronictl_reload: patronictl reload ^^^^^^^^^^^^^^^^^ .. _patronictl_reload_synopsis: Synopsis """""""" .. code:: text reload CLUSTER_NAME [ MEMBER_NAME [, ... ] ] [ --group CITUS_GROUP ] [ { -r | --role } { leader | primary | standby-leader | replica | standby | any } ] [ --force ] .. _patronictl_reload_description: Description """"""""""" ``patronictl reload`` requests a reload of local configuration for one or more Patroni members. It also triggers ``pg_ctl reload`` on the managed Postgres instance, even if nothing has changed. .. _patronictl_reload_parameters: Parameters """""""""" ``CLUSTER_NAME`` Name of the Patroni cluster. ``MEMBER_NAME`` Request a reload of local configuration for the given Patroni member(s). Multiple members can be specified. If no members are specified, all of them are considered. ``--group`` Request a reload of members of the given Citus group. ``CITUS_GROUP`` is the ID of the Citus group. ``-r`` / ``--role`` Select members that have the given role. Role can be one of: - ``leader``: the leader of either a regular Patroni cluster or a standby Patroni cluster; or - ``primary``: the leader of a regular Patroni cluster; or - ``standby-leader``: the leader of a standby Patroni cluster; or - ``replica``: a replica of a Patroni cluster; or - ``standby``: same as ``replica``; or - ``any``: any role. Same as omitting this parameter. ``--force`` Flag to skip confirmation prompts when requesting a reload of the local configuration. Useful for scripts. .. _patronictl_reload_examples: Examples """""""" Request a reload of the local configuration of all members of the Patroni cluster: .. code:: bash $ patronictl -c postgres0.yml reload batman --force + Cluster: batman (7277694203142172922) -+-----------+----+-----------+ | Member | Host | Role | State | TL | Lag in MB | +-------------+----------------+---------+-----------+----+-----------+ | postgresql0 | 127.0.0.1:5432 | Leader | running | 5 | | | postgresql1 | 127.0.0.1:5433 | Replica | streaming | 5 | 0 | | postgresql2 | 127.0.0.1:5434 | Replica | streaming | 5 | 0 | +-------------+----------------+---------+-----------+----+-----------+ Reload request received for member postgresql0 and will be processed within 10 seconds Reload request received for member postgresql1 and will be processed within 10 seconds Reload request received for member postgresql2 and will be processed within 10 seconds .. _patronictl_remove: patronictl remove ^^^^^^^^^^^^^^^^^ .. _patronictl_remove_synopsis: Synopsis """""""" .. code:: text remove CLUSTER_NAME [ --group CITUS_GROUP ] [ { -f | --format } { pretty | tsv | json | yaml } ] .. _patronictl_remove_description: Description """"""""""" ``patronictl remove`` removes information of the cluster from the DCS. It is an interactive action. .. warning:: This operation will destroy the information of the Patroni cluster from the DCS. .. _patronictl_remove_parameters: Parameters """""""""" ``CLUSTER_NAME`` Name of the Patroni cluster. ``--group`` Remove information about the Patroni cluster related with the given Citus group. ``CITUS_GROUP`` is the ID of the Citus group. ``-f`` / ``--format`` How to format the list of members in the output when prompting for confirmation. Format can be one of: - ``pretty``: prints members as a pretty table; or - ``tsv``: prints members as tabular information, with columns delimited by ``\t``; or - ``json``: prints members in JSON format; or - ``yaml``: prints members in YAML format. The default is ``pretty``. .. _patronictl_remove_examples: Examples """""""" Remove information about Patroni cluster ``batman`` from the DCS: .. code:: bash $ patronictl -c postgres0.yml remove batman + Cluster: batman (7277694203142172922) -+-----------+----+-----------+ | Member | Host | Role | State | TL | Lag in MB | +-------------+----------------+---------+-----------+----+-----------+ | postgresql0 | 127.0.0.1:5432 | Leader | running | 5 | | | postgresql1 | 127.0.0.1:5433 | Replica | streaming | 5 | 0 | | postgresql2 | 127.0.0.1:5434 | Replica | streaming | 5 | 0 | +-------------+----------------+---------+-----------+----+-----------+ Please confirm the cluster name to remove: batman You are about to remove all information in DCS for batman, please type: "Yes I am aware": Yes I am aware This cluster currently is healthy. Please specify the leader name to continue: postgresql0 .. _patronictl_restart: patronictl restart ^^^^^^^^^^^^^^^^^^ .. _patronictl_restart_synopsis: Synopsis """""""" .. code:: text restart CLUSTER_NAME [ MEMBER_NAME [, ...] ] [ --group CITUS_GROUP ] [ { -r | --role } { leader | primary | standby-leader | replica | standby | any } ] [ --any ] [ --pg-version PG_VERSION ] [ --pending ] [ --timeout TIMEOUT ] [ --scheduled TIMESTAMP ] [ --force ] .. _patronictl_restart_description: Description """"""""""" ``patronictl restart`` requests a restart of the Postgres instance managed by a member of the Patroni cluster. The restart can be performed immediately or scheduled for later. .. _patronictl_restart_parameters: Parameters """""""""" ``CLUSTER_NAME`` Name of the Patroni cluster. ``--group`` Restart the Patroni cluster related with the given Citus group. ``CITUS_GROUP`` is the ID of the Citus group. ``-r`` / ``--role`` Choose members that have the given role. Role can be one of: - ``leader``: the leader of either a regular Patroni cluster or a standby Patroni cluster; or - ``primary``: the leader of a regular Patroni cluster; or - ``standby-leader``: the leader of a standby Patroni cluster; or - ``replica``: a replica of a Patroni cluster; or - ``standby``: same as ``replica``; or - ``any``: any role. Same as omitting this parameter. ``--any`` Restart a single random node among the ones which match the given filters. ``--pg-version`` Select only members which version of the managed Postgres instance is older than the given version. ``PG_VERSION`` is the Postgres version to be compared. ``--pending`` Select only members which are flagged as ``Pending restart``. ``timeout`` Abort the restart if it takes more than the specified timeout, and fail over to a replica if the issue is on the primary. ``TIMEOUT`` is the amount of seconds to wait before aborting the restart. ``--scheduled`` Schedule a restart to occur at the given timestamp. ``TIMESTAMP`` is the timestamp when the restart should occur. Specify it in unambiguous format, preferrably with time zone. You can also use the literal ``now`` for the restart to be executed immediately. ``--force`` Flag to skip confirmation prompts when requesting the restart operations. Useful for scripts. .. _patronictl_restart_examples: Examples """""""" Restart all members of the cluster immediately: .. code:: bash $ patronictl -c postgres0.yml restart batman --force + Cluster: batman (7277694203142172922) -+-----------+----+-----------+ | Member | Host | Role | State | TL | Lag in MB | +-------------+----------------+---------+-----------+----+-----------+ | postgresql0 | 127.0.0.1:5432 | Leader | running | 6 | | | postgresql1 | 127.0.0.1:5433 | Replica | streaming | 6 | 0 | | postgresql2 | 127.0.0.1:5434 | Replica | streaming | 6 | 0 | +-------------+----------------+---------+-----------+----+-----------+ Success: restart on member postgresql0 Success: restart on member postgresql1 Success: restart on member postgresql2 Restart a random member of the cluster immediately: .. code:: bash $ patronictl -c postgres0.yml restart batman --any --force + Cluster: batman (7277694203142172922) -+-----------+----+-----------+ | Member | Host | Role | State | TL | Lag in MB | +-------------+----------------+---------+-----------+----+-----------+ | postgresql0 | 127.0.0.1:5432 | Leader | running | 6 | | | postgresql1 | 127.0.0.1:5433 | Replica | streaming | 6 | 0 | | postgresql2 | 127.0.0.1:5434 | Replica | streaming | 6 | 0 | +-------------+----------------+---------+-----------+----+-----------+ Success: restart on member postgresql1 Schedule a restart to occur at ``2023-09-13T18:00-03:00``: .. code:: bash $ patronictl -c postgres0.yml restart batman --scheduled 2023-09-13T18:00-03:00 --force + Cluster: batman (7277694203142172922) -+-----------+----+-----------+ | Member | Host | Role | State | TL | Lag in MB | +-------------+----------------+---------+-----------+----+-----------+ | postgresql0 | 127.0.0.1:5432 | Leader | running | 6 | | | postgresql1 | 127.0.0.1:5433 | Replica | streaming | 6 | 0 | | postgresql2 | 127.0.0.1:5434 | Replica | streaming | 6 | 0 | +-------------+----------------+---------+-----------+----+-----------+ Success: restart scheduled on member postgresql0 Success: restart scheduled on member postgresql1 Success: restart scheduled on member postgresql2 .. _patronictl_resume: patronictl resume ^^^^^^^^^^^^^^^^^ .. _patronictl_resume_synopsis: Synopsis """""""" .. code:: text resume [ CLUSTER_NAME ] [ --group CITUS_GROUP ] [ --wait ] .. _patronictl_resume_description: Description """"""""""" ``patronictl resume`` takes the Patroni cluster out of maintenance mode and re-enables automatic failover. .. _patronictl_resume_parameters: Parameters """""""""" ``CLUSTER_NAME`` Name of the Patroni cluster. If not given, ``patronictl`` will attempt to fetch that from the ``scope`` configuration, if it exists. ``--group`` Resume the given Citus group. ``CITUS_GROUP`` is the ID of the Citus group. If not given, ``patronictl`` will attempt to fetch that from the ``citus.group`` configuration, if it exists. ``--wait`` Wait until all Patroni members are unpaused before returning control to the caller. .. _patronictl_resume_examples: Examples """""""" Put the cluster out of maintenance mode: .. code:: bash $ patronictl -c postgres0.yml resume batman --wait 'resume' request sent, waiting until it is recognized by all nodes Success: cluster management is resumed .. _patronictl_show_config: patronictl show-config ^^^^^^^^^^^^^^^^^^^^^^ .. _patronictl_show_config_synopsis: Synopsis """""""" .. code:: text show-config [ CLUSTER_NAME ] [ --group CITUS_GROUP ] .. _patronictl_show_config_description: Description """"""""""" ``patronictl show-config`` shows the dynamic configuration of the cluster that is stored in the DCS. .. _patronictl_show_config_parameters: Parameters """""""""" ``CLUSTER_NAME`` Name of the Patroni cluster. If not given, ``patronictl`` will attempt to fetch that from the ``scope`` configuration, if it exists. ``--group`` Show dynamic configuration of the given Citus group. ``CITUS_GROUP`` is the ID of the Citus group. If not given, ``patronictl`` will attempt to fetch that from the ``citus.group`` configuration, if it exists. .. _patronictl_show_config_examples: Examples """""""" Show dynamic configuration of cluster ``batman``: .. code:: bash $ patronictl -c postgres0.yml show-config batman loop_wait: 10 postgresql: parameters: max_connections: 250 pg_hba: - host replication replicator 127.0.0.1/32 md5 - host all all 0.0.0.0/0 md5 use_pg_rewind: true retry_timeout: 10 ttl: 30 .. _patronictl_switchover: patronictl switchover ^^^^^^^^^^^^^^^^^^^^^ .. _patronictl_switchover_synopsis: Synopsis """""""" .. code:: text switchover [ CLUSTER_NAME ] [ --group CITUS_GROUP ] [ { --leader | --primary } LEADER_NAME ] --candidate CANDIDATE_NAME [ --force ] .. _patronictl_switchover_description: Description """"""""""" ``patronictl switchover`` performs a switchover in the cluster. It is designed to be used when the cluster is healthy, e.g.: - There is a leader; - There are synchronous standbys available in a synchronous cluster. .. note:: If your cluster is unhealthy you might be interested in ``patronictl failover`` instead. .. _patronictl_switchover_parameters: Parameters """""""""" ``CLUSTER_NAME`` Name of the Patroni cluster. If not given, ``patronictl`` will attempt to fetch that from the ``scope`` configuration, if it exists. ``--group`` Perform a switchover in the given Citus group. ``CITUS_GROUP`` is the ID of the Citus group. ``--leader`` / ``--primary`` Indicate who is the leader to be demoted at switchover time. ``LEADER_NAME`` should match the name of the current leader in the cluster. ``--candidate`` The node to be promoted on switchover, and take the primary role. ``CANDIDATE_NAME`` is the name of the node to be promoted. ``--scheduled`` Schedule a switchover to occur at the given timestamp. ``TIMESTAMP`` is the timestamp when the switchover should occur. Specify it in unambiguous format, preferrably with time zone. You can also use the literal ``now`` for the switchover to be executed immediately. ``--force`` Flag to skip confirmation prompts when performing the switchover. Useful for scripts. .. _patronictl_switchover_examples: Examples """""""" Switch over with node ``postgresql2``: .. code:: bash $ patronictl -c postgres0.yml switchover batman --leader postgresql0 --candidate postgresql2 --force Current cluster topology + Cluster: batman (7277694203142172922) -+-----------+----+-----------+ | Member | Host | Role | State | TL | Lag in MB | +-------------+----------------+---------+-----------+----+-----------+ | postgresql0 | 127.0.0.1:5432 | Leader | running | 6 | | | postgresql1 | 127.0.0.1:5433 | Replica | streaming | 6 | 0 | | postgresql2 | 127.0.0.1:5434 | Replica | streaming | 6 | 0 | +-------------+----------------+---------+-----------+----+-----------+ 2023-09-13 14:15:23.07497 Successfully switched over to "postgresql2" + Cluster: batman (7277694203142172922) -+---------+----+-----------+ | Member | Host | Role | State | TL | Lag in MB | +-------------+----------------+---------+---------+----+-----------+ | postgresql0 | 127.0.0.1:5432 | Replica | stopped | | unknown | | postgresql1 | 127.0.0.1:5433 | Replica | running | 6 | 0 | | postgresql2 | 127.0.0.1:5434 | Leader | running | 6 | | +-------------+----------------+---------+---------+----+-----------+ Schedule a switchover between ``postgresql0`` and ``postgresql2`` to occur at ``2023-09-13T18:00:00-03:00``: .. code:: bash $ patronictl -c postgres0.yml switchover batman --leader postgresql0 --candidate postgresql2 --scheduled 2023-09-13T18:00-03:00 --force Current cluster topology + Cluster: batman (7277694203142172922) -+-----------+----+-----------+ | Member | Host | Role | State | TL | Lag in MB | +-------------+----------------+---------+-----------+----+-----------+ | postgresql0 | 127.0.0.1:5432 | Leader | running | 8 | | | postgresql1 | 127.0.0.1:5433 | Replica | streaming | 8 | 0 | | postgresql2 | 127.0.0.1:5434 | Replica | streaming | 8 | 0 | +-------------+----------------+---------+-----------+----+-----------+ 2023-09-13 14:18:11.20661 Switchover scheduled + Cluster: batman (7277694203142172922) -+-----------+----+-----------+ | Member | Host | Role | State | TL | Lag in MB | +-------------+----------------+---------+-----------+----+-----------+ | postgresql0 | 127.0.0.1:5432 | Leader | running | 8 | | | postgresql1 | 127.0.0.1:5433 | Replica | streaming | 8 | 0 | | postgresql2 | 127.0.0.1:5434 | Replica | streaming | 8 | 0 | +-------------+----------------+---------+-----------+----+-----------+ Switchover scheduled at: 2023-09-13T18:00:00-03:00 from: postgresql0 to: postgresql2 .. _patronictl_topology: patronictl topology ^^^^^^^^^^^^^^^^^^^ .. _patronictl_topology_synopsis: Synopsis """""""" .. code:: text topology [ CLUSTER_NAME [, ... ] ] [ --group CITUS_GROUP ] [ { -W | { -w | --watch } TIME } ] .. _patronictl_topology_description: Description """"""""""" ``patronictl topology`` shows information about the Patroni cluster and its members with a tree view approach. The following information is included in the output: ``Cluster`` Name of the Patroni cluster. .. note:: Shown in the table header. ``System identifier`` Postgres system identifier. .. note:: Shown in the table header. ``Member`` Name of the Patroni member. .. note:: Information in this column is shown as a tree view of members in terms of replication connections. ``Host`` Host where the member is located. ``Role`` Current role of the member. Can be one among: * ``Leader``: the current leader of a regular Patroni cluster; or * ``Standby Leader``: the current leader of a Patroni standby cluster; or * ``Sync Standby``: a synchronous standby of a Patroni cluster with synchronous mode enabled; or * ``Replica``: a regular standby of a Patroni cluster. ``State`` Current state of Postgres in the Patroni member. Some examples among the possible states: * ``running``: if Postgres is currently up and running; * ``streaming``: if a replica and Postgres is currently streaming WALs from the primary node; * ``in archive recovery``: if a replica and Postgres is currently fetching WALs from the archive; * ``stopped``: if Postgres had been shut down; * ``crashed``: if Postgres has crashed. ``TL`` Current Postgres timeline in the Patroni member. ``Lag in MB`` Amount worth of replication lag in megabytes between the Patroni member and its upstream. Besides that, the following information may be included in the output: ``Group`` Citus group ID. .. note:: Shown in the table header. Only shown if a Citus cluster. ``Pending restart`` ``*`` indicates the node needs a restart for some Postgres configuration to take effect. An empty value indicates the node does not require a restart. .. note:: Shown as a member attribute. Shown if node requires a restart. ``Scheduled restart`` Timestamp at which a restart has been scheduled for the Postgres instance managed by the Patroni member. An empty value indicates there is no scheduled restart for the member. .. note:: Shown as a member attribute. Shown if node has a scheduled restart. ``Tags`` Contains tags set for the Patroni member. An empty value indicates that either no tags have been configured, or that they have been configured with default values. .. note:: Shown as a member attribute. Shown if node has any custom tags, or any default tags with non-default values. ``Scheduled switchover`` Timestamp at which a switchover has been scheduled for the Patroni cluster, if any. .. note:: Shown in the table footer. Only shown if there is a scheduled switchover. ``Maintenance mode`` If the cluster monitoring is currently paused. .. note:: Shown in the table footer. Only shown if the cluster is paused. .. _patronictl_topology_parameters: Parameters """""""""" ``CLUSTER_NAME`` Name of the Patroni cluster. If not given, ``patronictl`` will attempt to fetch that from the ``scope`` configuration, if it exists. ``--group`` Show information about members from the given Citus group. ``CITUS_GROUP`` is the ID of the Citus group. ``-W`` Automatically refresh information every 2 seconds. ``-w`` / ``--watch`` Automatically refresh information at the specified interval. ``TIME`` is the interval between refreshes, in seconds. .. _patronictl_topology_examples: Examples """""""" Show topology of the cluster ``batman`` -- ``postgresql1`` and ``postgresql2`` are replicating from ``postgresql0``: .. code:: bash $ patronictl -c postgres0.yml topology batman + Cluster: batman (7277694203142172922) ---+-----------+----+-----------+ | Member | Host | Role | State | TL | Lag in MB | +---------------+----------------+---------+-----------+----+-----------+ | postgresql0 | 127.0.0.1:5432 | Leader | running | 8 | | | + postgresql1 | 127.0.0.1:5433 | Replica | streaming | 8 | 0 | | + postgresql2 | 127.0.0.1:5434 | Replica | streaming | 8 | 0 | +---------------+----------------+---------+-----------+----+-----------+ .. _patronictl_version: patronictl version ^^^^^^^^^^^^^^^^^^ .. _patronictl_version_synopsis: Synopsis """""""" .. code:: text version [ CLUSTER_NAME [, ... ] ] [ MEMBER_NAME [, ... ] ] [ --group CITUS_GROUP ] .. _patronictl_version_description: Description """"""""""" ``patronictl version`` gets the version of ``patronictl`` application. Besides that it may also include version information about Patroni clusters and their members. .. _patronictl_version_parameters: Parameters """""""""" ``CLUSTER_NAME`` Name of the Patroni cluster. ``MEMBER_NAME`` Name of the member of the Patroni cluster. ``--group`` Consider a Patroni cluster with the given Citus group. ``CITUS_GROUP`` is the ID of the Citus group. .. _patronictl_version_examples: Examples """""""" Get version of ``patronictl`` only: .. code:: bash $ patronictl -c postgres0.yml version patronictl version 3.1.0 Get version of ``patronictl`` and of all members of cluster ``batman``: .. code:: bash $ patronictl -c postgres0.yml version batman patronictl version 3.1.0 postgresql0: Patroni 3.1.0 PostgreSQL 15.2 postgresql1: Patroni 3.1.0 PostgreSQL 15.2 postgresql2: Patroni 3.1.0 PostgreSQL 15.2 Get version of ``patronictl`` and of members ``postgresql1`` and ``postgresql2`` of cluster ``batman``: .. code:: bash $ patronictl -c postgres0.yml version batman postgresql1 postgresql2 patronictl version 3.1.0 postgresql1: Patroni 3.1.0 PostgreSQL 15.2 postgresql2: Patroni 3.1.0 PostgreSQL 15.2 patroni-3.2.2/docs/pause.rst000066400000000000000000000054001455170150700157750ustar00rootroot00000000000000.. _pause: Pause/Resume mode for the cluster ================================= The goal -------- Under certain circumstances Patroni needs to temporarily step down from managing the cluster, while still retaining the cluster state in DCS. Possible use cases are uncommon activities on the cluster, such as major version upgrades or corruption recovery. During those activities nodes are often started and stopped for reasons unknown to Patroni, some nodes can be even temporarily promoted, violating the assumption of running only one primary. Therefore, Patroni needs to be able to "detach" from the running cluster, implementing an equivalent of the maintenance mode in Pacemaker. The implementation ------------------ When Patroni runs in a paused mode, it does not change the state of PostgreSQL, except for the following cases: - For each node, the member key in DCS is updated with the current information about the cluster. This causes Patroni to run read-only queries on a member node if the member is running. - For the Postgres primary with the leader lock Patroni updates the lock. If the node with the leader lock stops being the primary (i.e. is demoted manually), Patroni will release the lock instead of promoting the node back. - Manual unscheduled restart, manual unscheduled failover/switchover and reinitialize are allowed. No scheduled action is allowed. Manual switchover is only allowed if the node to switch over to is specified. - If 'parallel' primaries are detected by Patroni, it emits a warning, but does not demote the primary without the leader lock. - If there is no leader lock in the cluster, the running primary acquires the lock. If there is more than one primary node, then the first primary to acquire the lock wins. If there are no primary altogether, Patroni does not try to promote any replicas. There is an exception in this rule: if there is no leader lock because the old primary has demoted itself due to the manual promotion, then only the candidate node mentioned in the promotion request may take the leader lock. When the new leader lock is granted (i.e. after promoting a replica manually), Patroni makes sure the replicas that were streaming from the previous leader will switch to the new one. - When Postgres is stopped, Patroni does not try to start it. When Patroni is stopped, it does not try to stop the Postgres instance it is managing. - Patroni will not try to remove replication slots that don't represent the other cluster member or are not listed in the configuration of the permanent slots. User guide ---------- ``patronictl`` supports :ref:`pause ` and :ref:`resume ` commands. One can also issue a ``PATCH`` request to the ``{namespace}/{cluster}/config`` key with ``{"pause": true/false/null}`` patroni-3.2.2/docs/releases.rst000066400000000000000000005131101455170150700164650ustar00rootroot00000000000000.. _releases: Release notes ============= Version 3.2.2 ------------- **Bugfixes** - Don't let replica restore initialize key when DCS was wiped (Alexander Kukushkin) It was happening in the method where Patroni was supposed to take over a standalone PG cluster. - Use consistent read when fetching just updated sync key from Consul (Alexander Kukushkin) Consul doesn't provide any interface to immediately get ``ModifyIndex`` for the key that we just updated, therefore we have to perform an explicit read operation. Since stale reads are allowed by default, we sometimes used to get an outdated version of the key. - Reload Postgres config if a parameter that requires restart was reset to the original value (Polina Bungina) Previously Patroni wasn't updating the config, but only resetting the ``pending_restart``. - Fix erroneous inverted logic of the confirmation prompt message when doing a failover to an async candidate in synchronous mode (Polina Bungina) The problem existed only in ``patronictl``. - Exclude leader from failover candidates in ``patronictl`` (Polina Bungina) If the cluster is healthy, failing over to an existing leader is no-op. - Create Citus database and extension idempotently (Alexander Kukushkin, Zhao Junwang) It will allow to create them in the ``post_bootstrap`` script in case if there is a need to add some more dependencies to the Citus database. - Don't filter our contradictory ``nofailover`` tag (Polina Bungina) The configuration ``{nofailover: false, failover_priority: 0}`` set on a node didn't allow it to participate in the race, while it should, because ``nofailover`` tag should take precedence. - Fixed PyInstaller frozen issue (Sophia Ruan) The ``freeze_support()`` was called after ``argparse`` and as a result, Patroni wasn't able to start Postgres. - Fixed bug in the config generator for ``patronictl`` and ``Citus`` configuration (Israel Barth Rubio) It prevented ``patronictl`` and ``Citus`` configuration parameters set via environment variables from being written into the generated config. - Restore recovery GUCs and some Patroni-managed parameters when joining a running standby (Alexander Kukushkin) Patroni was failing to restart Postgres v12 onwards with an error about missing ``port`` in one of the internal structures. - Fixes around ``pending_restart`` flag (Polina Bungina) Don't expose ``pending_restart`` when in custom bootstrap with ``recovery_target_action = promote`` or when someone changed ``hot_standby`` or ``wal_log_hints`` using for example ``ALTER SYSTEM``. Version 3.2.1 ------------- **Bugfixes** - Limit accepted values for ``--format`` argument in ``patronictl`` (Alexander Kukushkin) It used to accept any arbitrary string and produce no output if the value wasn't recognized. - Verify that replica nodes received checkpoint LSN on shutdown before releasing the leader key (Alexander Kukushkin) Previously in some cases, we were using LSN of the SWITCH record that is followed by CHECKPOINT (if archiving mode is enabled). As a result the former primary sometimes had to do ``pg_rewind``, but there would be no data loss involved. - Do a real HTTP request when performing node name uniqueness check (Alexander Kukushkin) When running Patroni in containers it is possible that the traffic is routed using ``docker-proxy``, which listens on the port and accepts incoming connections. It was causing false positives. - Fixed Citus support with Etcd v2 (Alexander Kukushkin) Patroni was failing to deploy a new Citus cluster with Etcd v2. - Fixed ``pg_rewind`` behavior with Postgres v16+ (Alexander Kukushkin) The error message format of ``pg_waldump`` changed in v16 which caused ``pg_rewind`` to be called by Patroni even when it was not necessary. - Fixed bug with custom bootstrap (Alexander Kukushkin) Patroni was falsely applying ``--command`` argument, which is a bootstrap command itself. - Fixed the issue with REST API health check endpoints (Sophia Ruan) There were chances that after Postgres restart it could return ``unknown`` state for Postgres because connections were not properly closed. - Cache ``postgres --describe-config`` output results (Waynerv) They are used to figure out which GUCs are available to validate PostgreSQL configuration and we don't expect this list to change while Patroni is running. Version 3.2.0 ------------- **Deprecation notice** - The ``bootstrap.users`` support will be removed in version 4.0.0. If you need to create users after deploying a new cluster please use the ``bootstrap.post_bootstrap`` hook for that. **Breaking changes** - Enforce ``loop_wait + 2*retry_timeout <= ttl`` rule and hard-code minimal possible values (Alexander Kukushkin) Minimal values: ``loop_wait=2``, ``retry_timeout=3``, ``ttl=20``. In case values are smaller or violate the rule they are adjusted and a warning is written to Patroni logs. **New features** - Failover priority (Mark Pekala) With the help of ``tags.failover_priority`` it's now possible to make a node more preferred during the leader race. More details in the documentation (ref tags). - Implemented ``patroni --generate-config [--dsn DSN]`` and ``patroni --generate-sample-config`` (Polina Bungina) It allows to generate a config file for the running PostgreSQL cluster or a sample config file for the new Patroni cluster. - Use a dedicated connection to Postgres for Patroni REST API (Alexander Kukushkin) It helps to avoid blocking the main heartbeat loop if the system is under stress. - Enrich some endpoints with the ``name`` of the node (sskserk) For the monitoring endpoint ``name`` is added next to the ``scope`` and for metrics endpoint the ``name`` is added to tags. - Ensure strict failover/switchover difference (Polina Bungina) Be more precise in log messages and allow failing over to an asynchronous node in a healthy synchronous cluster. - Make permanent physical replication slots behave similarly to permanent logical slots (Alexander Kukushkin) Create permanent physical replication slots on all nodes that are allowed to become the leader and use ``pg_replication_slot_advance()`` function to advance ``restart_lsn`` for slots on standby nodes. - Add capability of specifying namespace through ``--dcs`` argument in ``patronictl`` (Israel Barth Rubio) It could be handy if ``patronictl`` is used without a configuration file. - Add support for additional parameters in custom bootstrap configuration (Israel Barth Rubio) Previously it was only possible to add custom arguments to the ``command`` and now one could list them as a mapping. **Improvements** - Set ``citus.local_hostname`` GUC to the same value which is used by Patroni to connect to the Postgres (Alexander Kukushkin) There are cases when Citus wants to have a connection to the local Postgres. By default it uses ``localhost``, which is not always available. **Bugfixes** - Ignore ``synchronous_mode`` setting in a standby cluster (Polina Bungina) Postgres doesn't support cascading synchronous replication and not ignoring ``synchronous_mode`` was breaking a switchover in a standby cluster. - Handle SIGCHLD for ``on_reload`` callback (Alexander Kukushkin) Not doing so results in a zombie process, which is reaped only when the next ``on_reload`` is executed. - Handle ``AuthOldRevision`` error when working with Etcd v3 (Alexander Kukushkin, Kenny Do) The error is raised if Etcd is configured to use JWT and when the user database in Etcd is updated. Version 3.1.2 ------------- **Bugfixes** - Fixed bug with ``wal_keep_size`` checks (Alexander Kukushkin) The ``wal_keep_size`` is a GUC that normally has a unit and Patroni was failing to cast its value to ``int``. As a result the value of ``bootstrap.dcs`` was not written to the ``/config`` key afterwards. - Detect and resolve inconsistencies between ``/sync`` key and ``synchronous_standby_names`` (Alexander Kukushkin) Normally, Patroni updates ``/sync`` and ``synchronous_standby_names`` in a very specific order, but in case of a bug or when someone manually reset ``synchronous_standby_names``, Patroni was getting into an inconsistent state. As a result it was possible that the failover happens to an asynchronous node. - Read GUC's values when joining running Postgres (Alexander Kukushkin) When restarted in ``pause``, Patroni was discarding the ``synchronous_standby_names`` GUC from the ``postgresql.conf``. To solve it and avoid similar issues, Patroni will read GUC's value if it is joining an already running Postgres. - Silenced annoying warnings when checking for node uniqueness (Alexander Kukushkin) ``WARNING`` messages are produced by ``urllib3`` if Patroni is quickly restarted. Version 3.1.1 ------------- **Bugfixes** - Reset failsafe state on promote (ChenChangAo) If switchover/failover happened shortly after failsafe mode had been activated, the newly promoted primary was demoting itself after failsafe becomes inactive. - Silence useless warnings in ``patronictl`` (Alexander Kukushkin) If ``patronictl`` uses the same patroni.yaml file as Patroni and can access ``PGDATA`` directory it might have been showing annoying warnings about incorrect values in the global configuration. - Explicitly enable synchronous mode for a corner case (Alexander Kukushkin) Synchronous mode effectively was never activated if there are no replicas streaming from the primary. - Fixed bug with ``0`` integer values validation (Israel Barth Rubio) In most cases, it didn't cause any issues, just warnings. - Don't return logical slots for standby cluster (Alexander Kukushkin) Patroni can't create logical replication slots in the standby cluster, thus they should be ignored if they are defined in the global configuration. - Avoid showing docstring in ``patronictl --help`` output (Israel Barth Rubio) The ``click`` module needs to get a special hint for that. - Fixed bug with ``kubernetes.standby_leader_label_value`` (Alexander Kukushkin) This feature effectively never worked. - Returned cluster system identifier to the ``patronictl list`` output (Polina Bungina) The problem was introduced while implementing the support for Citus, where we need to hide the identifier because it is different for coordinator and all workers. - Override ``write_leader_optime`` method in Kubernetes implementation (Alexander Kukushkin) The method is supposed to write shutdown LSN to the leader Endpoint/ConfigMap when there are no healthy replicas available to become the new primary. - Don't start stopped postgres in pause (Alexander Kukushkin) Due to a race condition, Patroni was falsely assuming that the standby should be restarted because some recovery parameters (``primary_conninfo`` or similar) were changed. - Fixed bug in ``patronictl query`` command (Israel Barth Rubio) It didn't work when only ``-m`` argument was provided or when none of ``-r`` or ``-m`` were provided. - Properly treat integer parameters that are used in the command line to start postgres (Polina Bungina) If values are supplied as strings and not casted to integer it was resulting in an incorrect calculation of ``max_prepared_transactions`` based on ``max_connections`` for Citus clusters. - Don't rely on ``pg_stat_wal_receiver`` when deciding on ``pg_rewind`` (Alexander Kukushkin) It could happen that ``received_tli`` reported by ``pg_stat_wal_recevier`` is ahead of the actual replayed timeline, while the timeline reported by ``DENTIFY_SYSTEM`` via replication connection is always correct. Version 3.1.0 ------------- **Breaking changes** - Changed semantic of ``restapi.keyfile`` and ``restapi.certfile`` (Alexander Kukushkin) Previously Patroni was using ``restapi.keyfile`` and ``restapi.certfile`` as client certificates as a fallback if there were no respective configuration parameters in the ``ctl`` section. .. warning:: If you enabled client certificates validation (``restapi.verify_client`` is set to ``required``), you also **must** provide **valid client certificates** in the ``ctl.certfile``, ``ctl.keyfile``, ``ctl.keyfile_password``. If not provided, Patroni will not work correctly. **New features** - Make Pod role label configurable (Waynerv) Values could be customized using ``kubernetes.leader_label_value``, ``kubernetes.follower_label_value`` and ``kubernetes.standby_leader_label_value`` parameters. This feature will be very useful when we change the ``master`` role to the ``primary``. You can read more about the feature and migration steps :ref:`here `. **Improvements** - Various improvements of ``patroni --validate-config`` (Alexander Kukushkin) Improved parameter validation for different DCS, ``bootstrap.dcs`` , ``ctl``, ``restapi``, and ``watchdog`` sections. - Start Postgres not in recovery if it crashed during recovery while Patroni is running (Alexander Kukushkin) It may reduce recovery time and will help to prevent unnecessary timeline increments. - Avoid unnecessary updates of ``/status`` key (Alexander Kukushkin) When there are no permanent logical slots Patroni was updating the ``/status`` on every heartbeat loop even when LSN on the primary didn't move forward. - Don't allow stale primary to win the leader race (Alexander Kukushkin) If Patroni was hanging during a significant time due to lack of resources it will additionally check that no other nodes promoted Postgres before acquiring the leader lock. - Implemented visibility of certain PostgreSQL parameters validation (Alexander Kukushkin, Feike Steenbergen) If validation of ``max_connections``, ``max_wal_senders``, ``max_prepared_transactions``, ``max_locks_per_transaction``, ``max_replication_slots``, or ``max_worker_processes`` failed Patroni was using some sane default value. Now in addition to that it will also show a warning. - Set permissions for files and directories created in ``PGDATA`` (Alexander Kukushkin) All files created by Patroni had only owner read/write permissions. This behaviour was breaking backup tools that run under a different user and relying on group read permissions. Now Patroni honors permissions on ``PGDATA`` and correctly sets permissions on all directories and files it creates inside ``PGDATA``. **Bugfixes** - Run ``archive_command`` through shell (Waynerv) Patroni might archive some WAL segments before doing crash recovery in a single-user mode or before ``pg_rewind``. If the archive_command contains some shell operators, like ``&&`` it didn't work with Patroni. - Fixed "on switchover" shutdown checks (Polina Bungina) It was possible that specified candidate is still streaming and didn't received shut down checking but the leader key was removed because some other nodes were healthy. - Fixed "is primary" check (Alexander Kukushkin) During the leader race replicas were not able to recognize that Postgres on the old leader is still running as a primary. - Fixed ``patronictl list`` (Alexander Kukushkin) The Cluster name field was missing in ``tsv``, ``json``, and ``yaml`` output formats. - Fixed ``pg_rewind`` behaviour after pause (Alexander Kukushkin) Under certain conditions, Patroni wasn't able to join the false primary back to the cluster with ``pg_rewind`` after coming out of maintenance mode. - Fixed bug in Etcd v3 implementation (Alexander Kukushkin) Invalidate internal KV cache if key update performed using ``create_revision``/``mod_revision`` field due to revision mismatch. - Fixed behaviour of replicas in standby cluster in pause (Alexander Kukushkin) When the leader key expires replicas in standby cluster will not follow the remote node but keep ``primary_conninfo`` as it is. Version 3.0.4 ------------- **New features** - Make the replication status of standby nodes visible (Alexander Kukushkin) For PostgreSQL 9.6+ Patroni will report the replication state as ``streaming`` when the standby is streaming from the other node or ``in archive recovery`` when there is no replication connection and ``restore_command`` is set. The state is visible in ``member`` keys in DCS, in the REST API, and in ``patronictl list`` output. **Improvements** - Improved error messages with Etcd v3 (Alexander Kukushkin) When Etcd v3 cluster isn't accessible Patroni was reporting that it can't access ``/v2`` endpoints. - Use quorum read in ``patronictl`` if it is possible (Alexander Kukushkin) Etcd or Consul clusters could be degraded to read-only, but from the ``patronictl`` view everything was fine. Now it will fail with the error. - Prevent splitbrain from duplicate names in configuration (Mark Pekala) When starting Patroni will check if node with the same name is registered in DCS, and try to query its REST API. If REST API is accessible Patroni exits with an error. It will help to protect from the human error. - Start Postgres not in recovery if it crashed while Patroni is running (Alexander Kukushkin) It may reduce recovery time and will help from unnecessary timeline increments. **Bugfixes** - REST API SSL certificate were not reloaded upon receiving a SIGHUP (Israel Barth Rubio) Regression was introduced in 3.0.3. - Fixed integer GUCs validation for parameters like ``max_connections`` (Feike Steenbergen) Patroni didn't like quoted numeric values. Regression was introduced in 3.0.3. - Fix issue with ``synchronous_mode`` (Alexander Kukushkin) Execute ``txid_current()`` with ``synchronous_commit=off`` so it doesn't accidentally wait for absent synchronous standbys when ``synchronous_mode_strict`` is enabled. Version 3.0.3 ------------- **New features** - Compatibility with PostgreSQL 16 beta1 (Alexander Kukushkin) Extended GUC's validator rules. - Make PostgreSQL GUC's validator extensible (Israel Barth Rubio) Validator rules are loaded from YAML files located in ``patroni/postgresql/available_parameters/`` directory. Files are ordered in alphabetical order and applied one after another. It makes possible to have custom validators for non-standard Postgres distributions. - Added ``restapi.request_queue_size`` option (Andrey Zhidenkov, Aleksei Sukhov) Sets request queue size for TCP socket used by Patroni REST API. Once the queue is full, further requests get a "Connection denied" error. The default value is 5. - Call ``initdb`` directly when initializing a new cluster (Matt Baker) Previously it was called via ``pg_ctl``, what required a special quoting of parameters passed to ``initdb``. - Added before stop hook (Le Duane) The hook could be configured via ``postgresql.before_stop`` and is executed right before ``pg_ctl stop``. The exit code doesn't impact shutdown process. - Added support for custom Postgres binary names (Israel Barth Rubio, Polina Bungina) When using a custom Postgres distribution it may be the case that the Postgres binaries are compiled with different names other than the ones used by the community Postgres distribution. Custom binary names could be configured using ``postgresql.bin_name.*`` and ``PATRONI_POSTGRESQL_BIN_*`` environment variables. **Improvements** - Various improvements of ``patroni --validate-config`` (Polina Bungina) - Make ``bootstrap.initdb`` optional. It is only required for new clusters, but ``patroni --validate-config`` was complaining if it was missing in the config. - Don't error out when ``postgresql.bin_dir`` is empty or not set. Try to first find Postgres binaries in the default PATH instead. - Make ``postgresql.authentication.rewind`` section optional. If it is missing, Patroni is using the superuser. - Improved error reporting in ``patronictl`` (Israel Barth Rubio) The ``\n`` symbol was rendered as it is, instead of the actual newline symbol. **Bugfixes** - Fixed issue in Citus support (Alexander Kukushkin) If the REST API call from the promoted worker to the coordinator failed during switchover it was leaving the given Citus group blocked during indefinite time. - Allow `etcd3` URL in `--dcs-url` option of `patronictl` (Israel Barth Rubio) If users attempted to pass a `etcd3` URL through `--dcs-url` option of `patronictl` they would face an exception. Version 3.0.2 ------------- .. warning:: Version 3.0.2 dropped support of Python older than 3.6. **New features** - Added sync standby replica status to ``/metrics`` endpoint (Thomas von Dein, Alexander Kukushkin) Before were only reporting ``primary``/``standby_leader``/``replica``. - User-friendly handling of ``PAGER`` in ``patronictl`` (Israel Barth Rubio) It makes pager configurable via ``PAGER`` environment variable, which overrides default ``less`` and ``more``. - Make K8s retriable HTTP status code configurable (Alexander Kukushkin) On some managed platforms it is possible to get status code ``401 Unauthorized``, which sometimes gets resolved after a few retries. **Improvements** - Set ``hot_standby`` to ``off`` during custom bootstrap only if ``recovery_target_action`` is set to ``promote`` (Alexander Kukushkin) It was necessary to make ``recovery_target_action=pause`` work correctly. - Don't allow ``on_reload`` callback to kill other callbacks (Alexander Kukushkin) ``on_start``/``on_stop``/``on_role_change`` are usually used to add/remove Virtual IP and ``on_reload`` should not interfere with them. - Switched to ``IMDSFetcher`` in aws callback example script (Polina Bungina) The ``IMDSv2`` requires a token to work with and the ``IMDSFetcher`` handles it transparently. **Bugfixes** - Fixed ``patronictl switchover`` on Citus cluster running on Kubernetes (Lukáš Lalinský) It didn't work for namespaces different from ``default``. - Don't write to ``PGDATA`` if major version is not known (Alexander Kukushkin) If right after the start ``PGDATA`` was empty (maybe wasn't yet mounted), Patroni was making a false assumption about PostgreSQL version and falsely creating ``recovery.conf`` file even if the actual major version is v10+. - Fixed bug with Citus metadata after coordinator failover (Alexander Kukushkin) The ``citus_set_coordinator_host()`` call doesn't cause metadata sync and the change was invisible on worker nodes. The issue is solved by switching to ``citus_update_node()``. - Use etcd hosts listed in the config file as a fallback when all etcd nodes "failed" (Alexander Kukushkin) The etcd cluster may change topology over time and Patroni tries to follow it. If at some point all nodes became unreachable Patroni will use a combination of nodes from the config plus the last known topology when trying to reconnect. Version 3.0.1 ------------- **Bugfixes** - Pass proper role name to an ``on_role_change`` callback script'. (Alexander Kukushkin, Polina Bungina) Patroni used to erroneously pass ``promoted`` role to an ``on_role_change`` callback script on promotion. The passed role name changed back to ``master``. This regression was introduced in 3.0.0. Version 3.0.0 ------------- This version adds integration with `Citus `__ and makes it possible to survive temporary DCS outages without demoting primary. .. warning:: - Version 3.0.0 is the last release supporting Python 2.7. Upcoming release will drop support of Python versions older than 3.7. - The RAFT support is deprecated. We will do our best to maintain it, but take neither guarantee nor responsibility for possible issues. - This version is the first step in getting rid of the "master", in favor of "primary". Upgrading to the next major release will work reliably only if you run at least 3.0.0. **New features** - DCS failsafe mode (Alexander Kukushkin, Polina Bungina) If the feature is enabled it will allow Patroni cluster to survive temporary DCS outages. You can find more details in the :ref:`documentation `. - Citus support (Alexander Kukushkin, Polina Bungina, Jelte Fennema) Patroni enables easy deployment and management of `Citus `__ clusters with HA. Please check :ref:`here ` page for more information. **Improvements** - Suppress recurring errors when dropping unknown but active replication slots (Michael Banck) Patroni will still write these logs, but only in DEBUG. - Run only one monitoring query per HA loop (Alexander Kukushkin) It wasn't the case if synchronous replication is enabled. - Keep only latest failed data directory (William Albertus Dembo) If bootstrap failed Patroni used to rename $PGDATA folder with timestamp suffix. From now on the suffix will be ``.failed`` and if such folder exists it is removed before renaming. - Improved check of synchronous replication connections (Alexander Kukushkin) When the new host is added to the ``synchronous_standby_names`` it will be set as synchronous in DCS only when it managed to catch up with the primary in addition to ``pg_stat_replication.sync_state = 'sync'``. **Removed functionality** - Remove ``patronictl scaffold`` (Alexander Kukushkin) The only reason for having it was a hacky way of running standby clusters. Version 2.1.7 ------------- **Bugfixes** - Fixed little incompatibilities with legacy python modules (Alexander Kukushkin) They prevented from building/running Patroni on Debian buster/Ubuntu bionic. Version 2.1.6 ------------- **Improvements** - Fix annoying exceptions on ssl socket shutdown (Alexander Kukushkin) The HAProxy is closing connections as soon as it got the HTTP Status code leaving no time for Patroni to properly shutdown SSL connection. - Adjust example Dockerfile for arm64 (Polina Bungina) Remove explicit ``amd64`` and ``x86_64``, don't remove ``libnss_files.so.*``. **Security improvements** - Enforce ``search_path=pg_catalog`` for non-replication connections (Alexander Kukushkin) Since Patroni is heavily relying on superuser connections, we want to protect it from the possible attacks carried out using user-defined functions and/or operators in ``public`` schema with the same name and signature as the corresponding objects in ``pg_catalog``. For that, ``search_path=pg_catalog`` is enforced for all connections created by Patroni (except replication connections). - Prevent passwords from being recorded in ``pg_stat_statements`` (Feike Steenbergen) It is achieved by setting ``pg_stat_statements.track_utility=off`` when creating users. **Bugfixes** - Declare ``proxy_address`` as optional (Denis Laxalde) As it is effectively a non-required option. - Improve behaviour of the insecure option (Alexander Kukushkin) Ctl's ``insecure`` option didn't work properly when client certificates were used for REST API requests. - Take watchdog configuration from ``bootstrap.dcs`` when the new cluster is bootstrapped (Matt Baker) Patroni used to initially configure watchdog with defaults when bootstrapping a new cluster rather than taking configuration used to bootstrap the DCS. - Fix the way file extensions are treated while finding executables in WIN32 (Martín Marqués) Only add ``.exe`` to a file name if it has no extension yet. - Fix Consul TTL setup (Alexander Kukushkin) We used ``ttl/2.0`` when setting the value on the HTTPClient, but forgot to multiply the current value by 2 in the class' property. It was resulting in Consul TTL off by twice. **Removed functionality** - Remove ``patronictl configure`` (Polina Bungina) There is no more need for a separate ``patronictl`` config creation. Version 2.1.5 ------------- This version enhances compatibility with PostgreSQL 15 and declares Etcd v3 support as production ready. The Patroni on Raft remains in Beta. **New features** - Improve ``patroni --validate-config`` (Denis Laxalde) Exit with code 1 if config is invalid and print errors to stderr. - Don't drop replication slots in pause (Alexander Kukushkin) Patroni is automatically creating/removing physical replication slots when members are joining/leaving the cluster. In pause slots will no longer be removed. - Support the ``HEAD`` request method for monitoring endpoints (Robert Cutajar) If used instead of ``GET`` Patroni will return only the HTTP Status Code. - Support behave tests on Windows (Alexander Kukushkin) Emulate graceful Patroni shutdown (``SIGTERM``) on Windows by introduce the new REST API endpoint ``POST /sigterm``. - Introduce ``postgresql.proxy_address`` (Alexander Kukushkin) It will be written to the member key in DCS as the ``proxy_url`` and could be used/useful for service discovery. **Stability improvements** - Call ``pg_replication_slot_advance()`` from a thread (Alexander Kukushkin) On busy clusters with many logical replication slots the ``pg_replication_slot_advance()`` call was affecting the main HA loop and could result in the member key expiration. - Archive possibly missing WALs before calling ``pg_rewind`` on the old primary (Polina Bungina) If the primary crashed and was down during considerable time, some WAL files could be missing from archive and from the new primary. There is a chance that ``pg_rewind`` could remove these WAL files from the old primary making it impossible to start it as a standby. By archiving ``ready`` WAL files we not only mitigate this problem but in general improving continues archiving experience. - Ignore ``403`` errors when trying to create Kubernetes Service (Nick Hudson, Polina Bungina) Patroni was spamming logs by unsuccessful attempts to create the service, which in fact could already exist. - Improve liveness probe (Alexander Kukushkin) The liveness problem will start failing if the heartbeat loop is running longer than `ttl` on the primary or `2*ttl` on the replica. That will allow us to use it as an alternative for :ref:`watchdog ` on Kubernetes. - Make sure only sync node tries to grab the lock when switchover (Alexander Kukushkin, Polina Bungina) Previously there was a slim chance that up-to-date async member could become the leader if the manual switchover was performed without specifying the target. - Avoid cloning while bootstrap is running (Ants Aasma) Do not allow a create replica method that does not require a leader to be triggered while the cluster bootstrap is running. - Compatibility with kazoo-2.9.0 (Alexander Kukushkin) Depending on python version the ``SequentialThreadingHandler.select()`` method may raise ``TypeError`` and ``IOError`` exceptions if ``select()`` is called on the closed socket. - Explicitly shut down SSL connection before socket shutdown (Alexander Kukushkin) Not doing it resulted in ``unexpected eof while reading`` errors with OpenSSL 3.0. - Compatibility with `prettytable>=2.2.0` (Alexander Kukushkin) Due to the internal API changes the cluster name header was shown on the incorrect line. **Bugfixes** - Handle expired token for Etcd lease_grant (monsterxx03) In case of error get the new token and retry request. - Fix bug in the ``GET /read-only-sync`` endpoint (Alexander Kukushkin) It was introduced in previous release and effectively never worked. - Handle the case when data dir storage disappeared (Alexander Kukushkin) Patroni is periodically checking that the PGDATA is there and not empty, but in case of issues with storage the ``os.listdir()`` is raising the ``OSError`` exception, breaking the heart-beat loop. - Apply ``master_stop_timeout`` when waiting for user backends to close (Alexander Kukushkin) Something that looks like user backend could be in fact a background worker (e.g., Citus Maintenance Daemon) that is failing to stop. - Accept ``*:`` for ``postgresql.listen`` (Denis Laxalde) The ``patroni --validate-config`` was complaining about it being invalid. - Timeouts fixes in Raft (Alexander Kukushkin) When Patroni or patronictl are starting they try to get Raft cluster topology from known members. These calls were made without proper timeouts. - Forcefully update consul service if token was changed (John A. Lotoski) Not doing so results in errors "rpc error making call: rpc error making call: ACL not found". Version 2.1.4 ------------- **New features** - Improve ``pg_rewind`` behavior on typical Debian/Ubuntu systems (Gunnar "Nick" Bluth) On Postgres setups that keep `postgresql.conf` outside of the data directory (e.g. Ubuntu/Debian packages), ``pg_rewind --restore-target-wal`` fails to figure out the value of the ``restore_command``. - Allow setting ``TLSServerName`` on Consul service checks (Michael Gmelin) Useful when checks are performed by IP and the Consul ``node_name`` is not a FQDN. - Added ``ppc64le`` support in watchdog (Jean-Michel Scheiwiler) And fixed watchdog support on some non-x86 platforms. - Switched aws.py callback from ``boto`` to ``boto3`` (Alexander Kukushkin) ``boto`` 2.x is abandoned since 2018 and fails with python 3.9. - Periodically refresh service account token on K8s (Haitao Li) Since Kubernetes v1.21 service account tokens expire in 1 hour. - Added ``/read-only-sync`` monitoring endpoint (Dennis4b) It is similar to the ``/read-only`` but includes only synchronous replicas. **Stability improvements** - Don't copy the logical replication slot to a replica if there is a configuration mismatch in the logical decoding setup with the primary (Alexander Kukushkin) A replica won't copy a logical replication slot from the primary anymore if the slot doesn't match the ``plugin`` or ``database`` configuration options. Previously, the check for whether the slot matches those configuration options was not performed until after the replica copied the slot and started with it, resulting in unnecessary and repeated restarts. - Special handling of recovery configuration parameters for PostgreSQL v12+ (Alexander Kukushkin) While starting as replica Patroni should be able to update ``postgresql.conf`` and restart/reload if the leader address has changed by caching current parameters values instead of querying them from ``pg_settings``. - Better handling of IPv6 addresses in the ``postgresql.listen`` parameters (Alexander Kukushkin) Since the ``listen`` parameter has a port, people try to put IPv6 addresses into square brackets, which were not correctly stripped when there is more than one IP in the list. - Use ``replication`` credentials when performing divergence check only on PostgreSQL v10 and older (Alexander Kukushkin) If ``rewind`` is enabled, Patroni will again use either ``superuser`` or ``rewind`` credentials on newer Postgres versions. **Bugfixes** - Fixed missing import of ``dateutil.parser`` (Wesley Mendes) Tests weren't failing only because it was also imported from other modules. - Ensure that ``optime`` annotation is a string (Sebastian Hasler) In certain cases Patroni was trying to pass it as numeric. - Better handling of failed ``pg_rewind`` attempt (Alexander Kukushkin) If the primary becomes unavailable during ``pg_rewind``, ``$PGDATA`` will be left in a broken state. Following that, Patroni will remove the data directory even if this is not allowed by the configuration. - Don't remove ``slots`` annotations from the leader ``ConfigMap``/``Endpoint`` when PostgreSQL isn't ready (Alexander Kukushkin) If ``slots`` value isn't passed the annotation will keep the current value. - Handle concurrency problem with K8s API watchers (Alexander Kukushkin) Under certain (unknown) conditions watchers might become stale; as a result, ``attempt_to_acquire_leader()`` method could fail due to the HTTP status code 409. In that case we reset watchers connections and restart from scratch. Version 2.1.3 ------------- **New features** - Added support for encrypted TLS keys for ``patronictl`` (Alexander Kukushkin) It could be configured via ``ctl.keyfile_password`` or the ``PATRONI_CTL_KEYFILE_PASSWORD`` environment variable. - Added more metrics to the /metrics endpoint (Alexandre Pereira) Specifically, ``patroni_pending_restart`` and ``patroni_is_paused``. - Make it possible to specify multiple hosts in the standby cluster configuration (Michael Banck) If the standby cluster is replicating from the Patroni cluster it might be nice to rely on client-side failover which is available in ``libpq`` since PostgreSQL v10. That is, the ``primary_conninfo`` on the standby leader and ``pg_rewind`` setting ``target_session_attrs=read-write`` in the connection string. The ``pgpass`` file will be generated with multiple lines (one line per host), and instead of calling ``CHECKPOINT`` on the primary cluster nodes the standby cluster will wait for ``pg_control`` to be updated. **Stability improvements** - Compatibility with legacy ``psycopg2`` (Alexander Kukushkin) For example, the ``psycopg2`` installed from Ubuntu 18.04 packages doesn't have the ``UndefinedFile`` exception yet. - Restart ``etcd3`` watcher if all Etcd nodes don't respond (Alexander Kukushkin) If the watcher is alive the ``get_cluster()`` method continues returning stale information even if all Etcd nodes are failing. - Don't remove the leader lock in the standby cluster while paused (Alexander Kukushkin) Previously the lock was maintained only by the node that was running as a primary and not a standby leader. **Bugfixes** - Fixed bug in the standby-leader bootstrap (Alexander Kukushkin) Patroni was considering bootstrap as failed if Postgres didn't start accepting connections after 60 seconds. The bug was introduced in the 2.1.2 release. - Fixed bug with failover to a cascading standby (Alexander Kukushkin) When figuring out which slots should be created on cascading standby we forgot to take into account that the leader might be absent. - Fixed small issues in Postgres config validator (Alexander Kukushkin) Integer parameters introduced in PostgreSQL v14 were failing to validate because min and max values were quoted in the validator.py - Use replication credentials when checking leader status (Alexander Kukushkin) It could be that the ``remove_data_directory_on_diverged_timelines`` is set, but there is no ``rewind_credentials`` defined and superuser access between nodes is not allowed. - Fixed "port in use" error on REST API certificate replacement (Ants Aasma) When switching certificates there was a race condition with a concurrent API request. If there is one active during the replacement period then the replacement will error out with a port in use error and Patroni gets stuck in a state without an active API server. - Fixed a bug in cluster bootstrap if passwords contain ``%`` characters (Bastien Wirtz) The bootstrap method executes the ``DO`` block, with all parameters properly quoted, but the ``cursor.execute()`` method didn't like an empty list with parameters passed. - Fixed the "AttributeError: no attribute 'leader'" exception (Hrvoje Milković) It could happen if the synchronous mode is enabled and the DCS content was wiped out. - Fix bug in divergence timeline check (Alexander Kukushkin) Patroni was falsely assuming that timelines have diverged. For pg_rewind it didn't create any problem, but if pg_rewind is not allowed and the ``remove_data_directory_on_diverged_timelines`` is set, it resulted in reinitializing the former leader. Version 2.1.2 ------------- **New features** - Compatibility with ``psycopg>=3.0`` (Alexander Kukushkin) By default ``psycopg2`` is preferred. `psycopg>=3.0` will be used only if ``psycopg2`` is not available or its version is too old. - Add ``dcs_last_seen`` field to the REST API (Michael Banck) This field notes the last time (as unix epoch) a cluster member has successfully communicated with the DCS. This is useful to identify and/or analyze network partitions. - Release the leader lock when ``pg_controldata`` reports "shut down" (Alexander Kukushkin) To solve the problem of slow switchover/shutdown in case ``archive_command`` is slow/failing, Patroni will remove the leader key immediately after ``pg_controldata`` started reporting PGDATA as ``shut down`` cleanly and it verified that there is at least one replica that received all changes. If there are no replicas that fulfill this condition the leader key is not removed and the old behavior is retained, i.e. Patroni will keep updating the lock. - Add ``sslcrldir`` connection parameter support (Kostiantyn Nemchenko) The new connection parameter was introduced in the PostgreSQL v14. - Allow setting ACLs for ZNodes in Zookeeper (Alwyn Davis) Introduce a new configuration option ``zookeeper.set_acls`` so that Kazoo will apply a default ACL for each ZNode that it creates. **Stability improvements** - Delay the next attempt of recovery till next HA loop (Alexander Kukushkin) If Postgres crashed due to out of disk space (for example) and fails to start because of that Patroni is too eagerly trying to recover it flooding logs. - Add log before demoting, which can take some time (Michael Banck) It can take some time for the demote to finish and it might not be obvious from looking at the logs what exactly is going on. - Improve "I am" status messages (Michael Banck) ``no action. I am a secondary ({0})`` vs ``no action. I am ({0}), a secondary`` - Cast to int ``wal_keep_segments`` when converting to ``wal_keep_size`` (Jorge Solórzano) It is possible to specify ``wal_keep_segments`` as a string in the global :ref:`dynamic configuration ` and due to Python being a dynamically typed language the string was simply multiplied. Example: ``wal_keep_segments: "100"`` was converted to ``100100100100100100100100100100100100100100100100MB``. - Allow switchover only to sync nodes when synchronous replication is enabled (Alexander Kukushkin) In addition to that do the leader race only against known synchronous nodes. - Use cached role as a fallback when Postgres is slow (Alexander Kukushkin) In some extreme cases Postgres could be so slow that the normal monitoring query does not finish in a few seconds. The ``statement_timeout`` exception not being properly handled could lead to the situation where Postgres was not demoted on time when the leader key expired or the update failed. In case of such exception Patroni will use the cached ``role`` to determine whether Postgres is running as a primary. - Avoid unnecessary updates of the member ZNode (Alexander Kukushkin) If no values have changed in the members data, the update should not happen. - Optimize checkpoint after promote (Alexander Kukushkin) Avoid doing ``CHECKPOINT`` if the latest timeline is already stored in ``pg_control``. It helps to avoid unnecessary ``CHECKPOINT`` right after initializing the new cluster with ``initdb``. - Prefer members without ``nofailover`` when picking sync nodes (Alexander Kukushkin) Previously sync nodes were selected only based on the replication lag, hence the node with ``nofailover`` tag had the same chances to become synchronous as any other node. That behavior was confusing and dangerous at the same time because in case of a failed primary the failover could not happen automatically. - Remove duplicate hosts from the etcd machine cache (Michael Banck) Advertised client URLs in the etcd cluster could be misconfigured. Removing duplicates in Patroni in this case is a low-hanging fruit. **Bugfixes** - Skip temporary replication slots while doing slot management (Alexander Kukushkin) Starting from v10 ``pg_basebackup`` creates a temporary replication slot for WAL streaming and Patroni was trying to drop it because the slot name looks unknown. In order to fix it, we skip all temporary slots when querying ``pg_stat_replication_slots`` view. - Ensure ``pg_replication_slot_advance()`` doesn't timeout (Alexander Kukushkin) Patroni was using the default ``statement_timeout`` in this case and once the call failed there are very high chances that it will never recover, resulting in increased size of ``pg_wal`` and ``pg_catalog`` bloat. - The ``/status`` wasn't updated on demote (Alexander Kukushkin) After demoting PostgreSQL the old leader updates the last LSN in DCS. Starting from ``2.1.0`` the new ``/status`` key was introduced, but the optime was still written to the ``/optime/leader``. - Handle DCS exceptions when demoting (Alexander Kukushkin) While demoting the master due to failure to update the leader lock it could happen that DCS goes completely down and the ``get_cluster()`` call raises an exception. Not being handled properly it results in Postgres remaining stopped until DCS recovers. - The ``use_unix_socket_repl`` didn't work is some cases (Alexander Kukushkin) Specifically, if ``postgresql.unix_socket_directories`` is not set. In this case Patroni is supposed to use the default value from ``libpq``. - Fix a few issues with Patroni REST API (Alexander Kukushkin) The ``clusters_unlocked`` sometimes could be not defined, what resulted in exceptions in the ``GET /metrics`` endpoint. In addition to that the error handling method was assuming that the ``connect_address`` tuple always has two elements, while in fact there could be more in case of IPv6. - Wait for newly promoted node to finish recovery before deciding to rewind (Alexander Kukushkin) It could take some time before the actual promote happens and the new timeline is created. Without waiting replicas could come to the conclusion that rewind isn't required. - Handle missing timelines in a history file when deciding to rewind (Alexander Kukushkin) If the current replica timeline is missing in the history file on the primary the replica was falsely assuming that rewind isn't required. Version 2.1.1 ------------- **New features** - Support for ETCD SRV name suffix (David Pavlicek) Etcd allows to differentiate between multiple Etcd clusters under the same domain and from now on Patroni also supports it. - Enrich history with the new leader (huiyalin525) It adds the new column to the ``patronictl history`` output. - Make the CA bundle configurable for in-cluster Kubernetes config (Aron Parsons) By default Patroni is using ``/var/run/secrets/kubernetes.io/serviceaccount/ca.crt`` and this new feature allows specifying the custom ``kubernetes.cacert``. - Support dynamically registering/deregistering as a Consul service and changing tags (Tommy Li) Previously it required Patroni restart. **Bugfixes** - Avoid unnecessary reload of REST API (Alexander Kukushkin) The previous release added a feature of reloading REST API certificates if changed on disk. Unfortunately, the reload was happening unconditionally right after the start. - Don't resolve cluster members when ``etcd.use_proxies`` is set (Alexander Kukushkin) When starting up Patroni checks the healthiness of Etcd cluster by querying the list of members. In addition to that, it also tried to resolve their hostnames, which is not necessary when working with Etcd via proxy and was causing unnecessary warnings. - Skip rows with NULL values in the ``pg_stat_replication`` (Alexander Kukushkin) It seems that the ``pg_stat_replication`` view could contain NULL values in the ``replay_lsn``, ``flush_lsn``, or ``write_lsn`` fields even when ``state = 'streaming'``. Version 2.1.0 ------------- This version adds compatibility with PostgreSQL v14, makes logical replication slots to survive failover/switchover, implements support of allowlist for REST API, and also reducing the number of logs to one line per heart-beat. **New features** - Compatibility with PostgreSQL v14 (Alexander Kukushkin) Unpause WAL replay if Patroni is not in a "pause" mode itself. It could be "paused" due to the change of certain parameters like for example ``max_connections`` on the primary. - Failover logical slots (Alexander Kukushkin) Make logical replication slots survive failover/switchover on PostgreSQL v11+. The replication slot if copied from the primary to the replica with restart and later the `pg_replication_slot_advance() `__ function is used to move it forward. As a result, the slot will already exist before the failover and no events should be lost, but, there is a chance that some events could be delivered more than once. - Implemented allowlist for Patroni REST API (Alexander Kukushkin) If configured, only IP's that matching rules would be allowed to call unsafe endpoints. In addition to that, it is possible to automatically include IP's of members of the cluster to the list. - Added support of replication connections via unix socket (Mohamad El-Rifai) Previously Patroni was always using TCP for replication connection what could cause some issues with SSL verification. Using unix sockets allows exempt replication user from SSL verification. - Health check on user-defined tags (Arman Jafari Tehrani) Along with :ref:`predefined tags: ` it is possible to specify any number of custom tags that become visible in the ``patronictl list`` output and in the REST API. From now on it is possible to use custom tags in health checks. - Added Prometheus ``/metrics`` endpoint (Mark Mercado, Michael Banck) The endpoint exposing the same metrics as ``/patroni``. - Reduced chattiness of Patroni logs (Alexander Kukushkin) When everything goes normal, only one line will be written for every run of HA loop. **Breaking changes** - The old ``permanent logical replication slots`` feature will no longer work with PostgreSQL v10 and older (Alexander Kukushkin) The strategy of creating the logical slots after performing a promotion can't guaranty that no logical events are lost and therefore disabled. - The ``/leader`` endpoint always returns 200 if the node holds the lock (Alexander Kukushkin) Promoting the standby cluster requires updating load-balancer health checks, which is not very convenient and easy to forget. To solve it, we change the behavior of the ``/leader`` health check endpoint. It will return 200 without taking into account whether the cluster is normal or the ``standby_cluster``. **Improvements in Raft support** - Reliable support of Raft traffic encryption (Alexander Kukushkin) Due to the different issues in the ``PySyncObj`` the encryption support was very unstable - Handle DNS issues in Raft implementation (Alexander Kukushkin) If ``self_addr`` and/or ``partner_addrs`` are configured using the DNS name instead of IP's the ``PySyncObj`` was effectively doing resolve only once when the object is created. It was causing problems when the same node was coming back online with a different IP. **Stability improvements** - Compatibility with ``psycopg2-2.9+`` (Alexander Kukushkin) In ``psycopg2`` the ``autocommit = True`` is ignored in the ``with connection`` block, which breaks replication protocol connections. - Fix excessive HA loop runs with Zookeeper (Alexander Kukushkin) Update of member ZNodes was causing a chain reaction and resulted in running the HA loops multiple times in a row. - Reload if REST API certificate is changed on disk (Michael Todorovic) If the REST API certificate file was updated in place Patroni didn't perform a reload. - Don't create pgpass dir if kerberos auth is used (Kostiantyn Nemchenko) Kerberos and password authentication are mutually exclusive. - Fixed little issues with custom bootstrap (Alexander Kukushkin) Start Postgres with ``hot_standby=off`` only when we do a PITR and restart it after PITR is done. **Bugfixes** - Compatibility with ``kazoo-2.7+`` (Alexander Kukushkin) Since Patroni is handling retries on its own, it is relying on the old behavior of ``kazoo`` that requests to a Zookeeper cluster are immediately discarded when there are no connections available. - Explicitly request the version of Etcd v3 cluster when it is known that we are connecting via proxy (Alexander Kukushkin) Patroni is working with Etcd v3 cluster via gPRC-gateway and it depending on the cluster version different endpoints (``/v3``, ``/v3beta``, or ``/v3alpha``) must be used. The version was resolved only together with the cluster topology, but since the latter was never done when connecting via proxy. Version 2.0.2 ------------- **New features** - Ability to ignore externally managed replication slots (James Coleman) Patroni is trying to remove any replication slot which is unknown to it, but there are certainly cases when replication slots should be managed externally. From now on it is possible to configure slots that should not be removed. - Added support for cipher suite limitation for REST API (Gunnar "Nick" Bluth) It could be configured via ``restapi.ciphers`` or the ``PATRONI_RESTAPI_CIPHERS`` environment variable. - Added support for encrypted TLS keys for REST API (Jonathan S. Katz) It could be configured via ``restapi.keyfile_password`` or the ``PATRONI_RESTAPI_KEYFILE_PASSWORD`` environment variable. - Constant time comparison of REST API authentication credentials (Alex Brasetvik) Use ``hmac.compare_digest()`` instead of ``==``, which is vulnerable to timing attack. - Choose synchronous nodes based on replication lag (Krishna Sarabu) If the replication lag on the synchronous node starts exceeding the configured threshold it could be demoted to asynchronous and/or replaced by the other node. Behaviour is controlled with ``maximum_lag_on_syncnode``. **Stability improvements** - Start postgres with ``hot_standby = off`` when doing custom bootstrap (Igor Yanchenko) During custom bootstrap Patroni is restoring the basebackup, starting Postgres up, and waiting until recovery finishes. Some PostgreSQL parameters on the standby can't be smaller than on the primary and if the new value (restored from WAL) is higher than the configured one, Postgres panics and stops. In order to avoid such behavior we will do custom bootstrap without ``hot_standby`` mode. - Warn the user if the required watchdog is not healthy (Nicolas Thauvin) When the watchdog device is not writable or missing in required mode, the member cannot be promoted. Added a warning to show the user where to search for this misconfiguration. - Better verbosity for single-user mode recovery (Alexander Kukushkin) If Patroni notices that PostgreSQL wasn't shutdown clearly, in certain cases the crash-recovery is executed by starting Postgres in single-user mode. It could happen that the recovery failed (for example due to the lack of space on disk) but errors were swallowed. - Added compatibility with ``python-consul2`` module (Alexander Kukushkin, Wilfried Roset) The good old ``python-consul`` is not maintained since a few years, therefore someone created a fork with new features and bug-fixes. - Don't use ``bypass_api_service`` when running ``patronictl`` (Alexander Kukushkin) When a K8s pod is running in a non-``default`` namespace it does not necessarily have enough permissions to query the ``kubernetes`` endpoint. In this case Patroni shows the warning and ignores the ``bypass_api_service`` setting. In case of ``patronictl`` the warning was a bit annoying. - Create ``raft.data_dir`` if it doesn't exists or make sure that it is writable (Mark Mercado) Improves user-friendliness and usability. **Bugfixes** - Don't interrupt restart or promote if lost leader lock in pause (Alexander Kukushkin) In pause it is allowed to run postgres as primary without lock. - Fixed issue with ``shutdown_request()`` in the REST API (Nicolas Limage) In order to improve handling of SSL connections and delay the handshake until thread is started Patroni overrides a few methods in the ``HTTPServer``. The ``shutdown_request()`` method was forgotten. - Fixed issue with sleep time when using Zookeeper (Alexander Kukushkin) There were chances that Patroni was sleeping up to twice longer between running HA code. - Fixed invalid ``os.symlink()`` calls when moving data directory after failed bootstrap (Andrew L'Ecuyer) If the bootstrap failed Patroni is renaming data directory, pg_wal, and all tablespaces. After that it updates symlinks so filesystem remains consistent. The symlink creation was failing due to the ``src`` and ``dst`` arguments being swapped. - Fixed bug in the post_bootstrap() method (Alexander Kukushkin) If the superuser password wasn't configured Patroni was failing to call the ``post_init`` script and therefore the whole bootstrap was failing. - Fixed an issues with pg_rewind in the standby cluster (Alexander Kukushkin) If the superuser name is different from Postgres, the ``pg_rewind`` in the standby cluster was failing because the connection string didn't contain the database name. - Exit only if authentication with Etcd v3 explicitly failed (Alexander Kukushkin) On start Patroni performs discovery of Etcd cluster topology and authenticates if it is necessarily. It could happen that one of etcd servers is not accessible, Patroni was trying to perform authentication on this server and failing instead of retrying with the next node. - Handle case with psutil cmdline() returning empty list (Alexander Kukushkin) Zombie processes are still postmasters children, but they don't have cmdline() - Treat ``PATRONI_KUBERNETES_USE_ENDPOINTS`` environment variable as boolean (Alexander Kukushkin) Not doing so was making impossible disabling ``kubernetes.use_endpoints`` via environment. - Improve handling of concurrent endpoint update errors (Alexander Kukushkin) Patroni will explicitly query the current endpoint object, verify that the current pod still holds the leader lock and repeat the update. Version 2.0.1 ------------- **New features** - Use ``more`` as pager in ``patronictl edit-config`` if ``less`` is not available (Pavel Golub) On Windows it would be the ``more.com``. In addition to that, ``cdiff`` was changed to ``ydiff`` in ``requirements.txt``, but ``patronictl`` still supports both for compatibility. - Added support of ``raft`` ``bind_addr`` and ``password`` (Alexander Kukushkin) ``raft.bind_addr`` might be useful when running behind NAT. ``raft.password`` enables traffic encryption (requires the ``cryptography`` module). - Added ``sslpassword`` connection parameter support (Kostiantyn Nemchenko) The connection parameter was introduced in PostgreSQL 13. **Stability improvements** - Changed the behavior in pause (Alexander Kukushkin) 1. Patroni will not call the ``bootstrap`` method if the ``PGDATA`` directory is missing/empty. 2. Patroni will not exit on sysid mismatch in pause, only log a warning. 3. The node will not try to grab the leader key in pause mode if Postgres is running not in recovery (accepting writes) but the sysid doesn't match with the initialize key. - Apply ``master_start_timeout`` when executing crash recovery (Alexander Kukushkin) If Postgres crashed on the leader node, Patroni does a crash-recovery by starting Postgres in single-user mode. During the crash-recovery the leader lock is being updated. If the crash-recovery didn't finish in ``master_start_timeout`` seconds, Patroni will stop it forcefully and release the leader lock. - Removed the ``secure`` extra from the ``urllib3`` requirements (Alexander Kukushkin) The only reason for adding it there was the ``ipaddress`` dependency for python 2.7. **Bugfixes** - Fixed a bug in the ``Kubernetes.update_leader()`` (Alexander Kukushkin) An unhandled exception was preventing demoting the primary when the update of the leader object failed. - Fixed hanging ``patronictl`` when RAFT is being used (Alexander Kukushkin) When using ``patronictl`` with Patroni config, ``self_addr`` should be added to the ``partner_addrs``. - Fixed bug in ``get_guc_value()`` (Alexander Kukushkin) Patroni was failing to get the value of ``restore_command`` on PostgreSQL 12, therefore fetching missing WALs for ``pg_rewind`` didn't work. Version 2.0.0 ------------- This version enhances compatibility with PostgreSQL 13, adds support of multiple synchronous standbys, has significant improvements in handling of ``pg_rewind``, adds support of Etcd v3 and Patroni on pure RAFT (without Etcd, Consul, or Zookeeper), and makes it possible to optionally call the ``pre_promote`` (fencing) script. **PostgreSQL 13 support** - Don't fire ``on_reload`` when promoting to ``standby_leader`` on PostgreSQL 13+ (Alexander Kukushkin) When promoting to ``standby_leader`` we change ``primary_conninfo``, update the role and reload Postgres. Since ``on_role_change`` and ``on_reload`` effectively duplicate each other, Patroni will call only ``on_role_change``. - Added support for ``gssencmode`` and ``channel_binding`` connection parameters (Alexander Kukushkin) PostgreSQL 12 introduced ``gssencmode`` and 13 ``channel_binding`` connection parameters and now they can be used if defined in the ``postgresql.authentication`` section. - Handle renaming of ``wal_keep_segments`` to ``wal_keep_size`` (Alexander Kukushkin) In case of misconfiguration (``wal_keep_segments`` on 13 and ``wal_keep_size`` on older versions) Patroni will automatically adjust the configuration. - Use ``pg_rewind`` with ``--restore-target-wal`` on 13 if possible (Alexander Kukushkin) On PostgreSQL 13 Patroni checks if ``restore_command`` is configured and tells ``pg_rewind`` to use it. **New features** - [BETA] Implemented support of Patroni on pure RAFT (Alexander Kukushkin) This makes it possible to run Patroni without 3rd party dependencies, like Etcd, Consul, or Zookeeper. For HA you will have to run either three Patroni nodes or two nodes with Patroni and one node with ``patroni_raft_controller``. For more information please check the :ref:`documentation `. - [BETA] Implemented support for Etcd v3 protocol via gPRC-gateway (Alexander Kukushkin) Etcd 3.0 was released more than four years ago and Etcd 3.4 has v2 disabled by default. There are also chances that v2 will be completely removed from Etcd, therefore we implemented support of Etcd v3 in Patroni. In order to start using it you have to explicitly create the ``etcd3`` section is the Patroni configuration file. - Supporting multiple synchronous standbys (Krishna Sarabu) It allows running a cluster with more than one synchronous replicas. The maximum number of synchronous replicas is controlled by the new parameter ``synchronous_node_count``. It is set to 1 by default and has no effect when the ``synchronous_mode`` is set to ``off``. - Added possibility to call the ``pre_promote`` script (Sergey Dudoladov) Unlike callbacks, the ``pre_promote`` script is called synchronously after acquiring the leader lock, but before promoting Postgres. If the script fails or exits with a non-zero exitcode, the current node will release the leader lock. - Added support for configuration directories (Floris van Nee) YAML files in the directory loaded and applied in alphabetical order. - Advanced validation of PostgreSQL parameters (Alexander Kukushkin) In case the specific parameter is not supported by the current PostgreSQL version or when its value is incorrect, Patroni will remove the parameter completely or try to fix the value. - Wake up the main thread when the forced checkpoint after promote completed (Alexander Kukushkin) Replicas are waiting for checkpoint indication via member key of the leader in DCS. The key is normally updated only once per HA loop. Without waking the main thread up, replicas will have to wait up to ``loop_wait`` seconds longer than necessary. - Use of ``pg_stat_wal_recevier`` view on 9.6+ (Alexander Kukushkin) The view contains up-to-date values of ``primary_conninfo`` and ``primary_slot_name``, while the contents of ``recovery.conf`` could be stale. - Improved handing of IPv6 addresses in the Patroni config file (Mateusz Kowalski) The IPv6 address is supposed to be enclosed into square brackets, but Patroni was expecting to get it plain. Now both formats are supported. - Added Consul ``service_tags`` configuration parameter (Robert Edström) They are useful for dynamic service discovery, for example by load balancers. - Implemented SSL support for Zookeeper (Kostiantyn Nemchenko) It requires ``kazoo>=2.6.0``. - Implemented ``no_params`` option for custom bootstrap method (Kostiantyn Nemchenko) It allows calling ``wal-g``, ``pgBackRest`` and other backup tools without wrapping them into shell scripts. - Move WAL and tablespaces after a failed init (Feike Steenbergen) When doing ``reinit``, Patroni was already removing not only ``PGDATA`` but also the symlinked WAL directory and tablespaces. Now the ``move_data_directory()`` method will do a similar job, i.e. rename WAL directory and tablespaces and update symlinks in PGDATA. **Improved in pg_rewind support** - Improved timeline divergence check (Alexander Kukushkin) We don't need to rewind when the replayed location on the replica is not ahead of the switchpoint or the end of the checkpoint record on the former primary is the same as the switchpoint. In order to get the end of the checkpoint record we use ``pg_waldump`` and parse its output. - Try to fetch missing WAL if ``pg_rewind`` complains about it (Alexander Kukushkin) It could happen that the WAL segment required for ``pg_rewind`` doesn't exist in the ``pg_wal`` directory anymore and therefore ``pg_rewind`` can't find the checkpoint location before the divergence point. Starting from PostgreSQL 13 ``pg_rewind`` could use ``restore_command`` for fetching missing WALs. For older PostgreSQL versions Patroni parses the errors of a failed rewind attempt and tries to fetch the missing WAL by calling the ``restore_command`` on its own. - Detect a new timeline in the standby cluster and trigger rewind/reinitialize if necessary (Alexander Kukushkin) The ``standby_cluster`` is decoupled from the primary cluster and therefore doesn't immediately know about leader elections and timeline switches. In order to detect the fact, the ``standby_leader`` periodically checks for new history files in ``pg_wal``. - Shorten and beautify history log output (Alexander Kukushkin) When Patroni is trying to figure out the necessity of ``pg_rewind``, it could write the content of the history file from the primary into the log. The history file is growing with every failover/switchover and eventually starts taking up too many lines, most of which are not so useful. Instead of showing the raw data, Patroni will show only 3 lines before the current replica timeline and 2 lines after. **Improvements on K8s** - Get rid of ``kubernetes`` python module (Alexander Kukushkin) The official python kubernetes client contains a lot of auto-generated code and therefore very heavy. Patroni uses only a small fraction of K8s API endpoints and implementing support for them wasn't hard. - Make it possible to bypass the ``kubernetes`` service (Alexander Kukushkin) When running on K8s, Patroni is usually communicating with the K8s API via the ``kubernetes`` service, the address of which is exposed in the ``KUBERNETES_SERVICE_HOST`` environment variable. Like any other service, the ``kubernetes`` service is handled by ``kube-proxy``, which in turn, depending on the configuration, is either relying on a userspace program or ``iptables`` for traffic routing. Skipping the intermediate component and connecting directly to the K8s master nodes allows us to implement a better retry strategy and mitigate risks of demoting Postgres when K8s master nodes are upgraded. - Sync HA loops of all pods of a Patroni cluster (Alexander Kukushkin) Not doing so was increasing failure detection time from ``ttl`` to ``ttl + loop_wait``. - Populate ``references`` and ``nodename`` in the subsets addresses on K8s (Alexander Kukushkin) Some load-balancers are relying on this information. - Fix possible race conditions in the ``update_leader()`` (Alexander Kukushkin) The concurrent update of the leader configmap or endpoint happening outside of Patroni might cause the ``update_leader()`` call to fail. In this case Patroni rechecks that the current node is still owning the leader lock and repeats the update. - Explicitly disallow patching non-existent config (Alexander Kukushkin) For DCS other than ``kubernetes`` the PATCH call is failing with an exception due to ``cluster.config`` being ``None``, but on Kubernetes it was happily creating the config annotation and preventing writing bootstrap configuration after the bootstrap finished. - Fix bug in ``pause`` (Alexander Kukushkin) Replicas were removing ``primary_conninfo`` and restarting Postgres when the leader key was absent, but they should do nothing. **Improvements in REST API** - Defer TLS handshake until worker thread has started (Alexander Kukushkin, Ben Harris) If the TLS handshake was done in the API thread and the client-side didn't send any data, the API thread was blocked (risking DoS). - Check ``basic-auth`` independently from client certificate in REST API (Alexander Kukushkin) Previously only the client certificate was validated. Doing two checks independently is an absolutely valid use-case. - Write double ``CRLF`` after HTTP headers of the ``OPTIONS`` request (Sergey Burladyan) HAProxy was happy with a single ``CRLF``, while Consul health-check complained about broken connection and unexpected EOF. - ``GET /cluster`` was showing stale members info for Zookeeper (Alexander Kukushkin) The endpoint was using the Patroni internal cluster view. For Patroni itself it didn't cause any issues, but when exposed to the outside world we need to show up-to-date information, especially replication lag. - Fixed health-checks for standby cluster (Alexander Kukushkin) The ``GET /standby-leader`` for a master and ``GET /master`` for a ``standby_leader`` were incorrectly responding with 200. - Implemented ``DELETE /switchover`` (Alexander Kukushkin) The REST API call deletes the scheduled switchover. - Created ``/readiness`` and ``/liveness`` endpoints (Alexander Kukushkin) They could be useful to eliminate "unhealthy" pods from subsets addresses when the K8s service is used with label selectors. - Enhanced ``GET /replica`` and ``GET /async`` REST API health-checks (Krishna Sarabu, Alexander Kukushkin) Checks now support optional keyword ``?lag=`` and will respond with 200 only if the lag is smaller than the supplied value. If relying on this feature please keep in mind that information about WAL position on the leader is updated only every ``loop_wait`` seconds! - Added support for user defined HTTP headers in the REST API response (Yogesh Sharma) This feature might be useful if requests are made from a browser. **Improvements in patronictl** - Don't try to call non-existing leader in ``patronictl pause`` (Alexander Kukushkin) While pausing a cluster without a leader on K8s, ``patronictl`` was showing warnings that member "None" could not be accessed. - Handle the case when member ``conn_url`` is missing (Alexander Kukushkin) On K8s it is possible that the pod doesn't have the necessary annotations because Patroni is not yet running. It was making ``patronictl`` to fail. - Added ability to print ASCII cluster topology (Maxim Fedotov, Alexander Kukushkin) It is very useful to get overview of the cluster with cascading replication. - Implement ``patronictl flush switchover`` (Alexander Kukushkin) Before that ``patronictl flush`` only supported cancelling scheduled restarts. **Bugfixes** - Attribute error during bootstrap of the cluster with existing PGDATA (Krishna Sarabu) When trying to create/update the ``/history`` key, Patroni was accessing the ``ClusterConfig`` object which wasn't created in DCS yet. - Improved exception handling in Consul (Alexander Kukushkin) Unhandled exception in the ``touch_member()`` method caused the whole Patroni process to crash. - Enforce ``synchronous_commit=local`` for the ``post_init`` script (Alexander Kukushkin) Patroni was already doing that when creating users (``replication``, ``rewind``), but missing it in the case of ``post_init`` was an oversight. As a result, if the script wasn't doing it internally on it's own the bootstrap in ``synchronous_mode`` wasn't able to finish. - Increased ``maxsize`` in the Consul pool manager (ponvenkates) With the default ``size=1`` some warnings were generated. - Patroni was wrongly reporting Postgres as running (Alexander Kukushkin) The state wasn't updated when for example Postgres crashed due to an out-of-disk error. - Put ``*`` into ``pgpass`` instead of missing or empty values (Alexander Kukushkin) If for example the ``standby_cluster.port`` is not specified, the ``pgpass`` file was incorrectly generated. - Skip physical replication slot creation on the leader node with special characters (Krishna Sarabu) Patroni appeared to be creating a dormant slot (when ``slots`` defined) for the leader node when the name contained special chars such as '-' (for e.g. "abc-us-1"). - Avoid removing non-existent ``pg_hba.conf`` in the custom bootstrap (Krishna Sarabu) Patroni was failing if ``pg_hba.conf`` happened to be located outside of the ``pgdata`` dir after custom bootstrap. Version 1.6.5 ------------- **New features** - Master stop timeout (Krishna Sarabu) The number of seconds Patroni is allowed to wait when stopping Postgres. Effective only when ``synchronous_mode`` is enabled. When set to value greater than 0 and the ``synchronous_mode`` is enabled, Patroni sends ``SIGKILL`` to the postmaster if the stop operation is running for more than the value set by ``master_stop_timeout``. Set the value according to your durability/availability tradeoff. If the parameter is not set or set to non-positive value, ``master_stop_timeout`` does not have an effect. - Don't create permanent physical slot with name of the primary (Alexander Kukushkin) It is a common problem that the primary recycles WAL segments while the replica is down. Now we have a good solution for static clusters, with a fixed number of nodes and names that never change. You just need to list the names of all nodes in the ``slots`` so the primary will not remove the slot when the node is down (not registered in DCS). - First draft of Config Validator (Igor Yanchenko) Use ``patroni --validate-config patroni.yaml`` in order to validate Patroni configuration. - Possibility to configure max length of timelines history (Krishna Sarabu) Patroni writes the history of failovers/switchovers into the ``/history`` key in DCS. Over time the size of this key becomes big, but in most cases only the last few lines are interesting. The ``max_timelines_history`` parameter allows to specify the maximum number of timeline history items to be kept in DCS. - Kazoo 2.7.0 compatibility (Danyal Prout) Some non-public methods in Kazoo changed their signatures, but Patroni was relying on them. **Improvements in patronictl** - Show member tags (Kostiantyn Nemchenko, Alexander Kukushkin) Tags are configured individually for every node and there was no easy way to get an overview of them - Improve members output (Alexander Kukushkin) The redundant cluster name won't be shown anymore on every line, only in the table header. .. code-block:: bash $ patronictl list + Cluster: batman (6813309862653668387) +---------+----+-----------+---------------------+ | Member | Host | Role | State | TL | Lag in MB | Tags | +-------------+----------------+--------+---------+----+-----------+---------------------+ | postgresql0 | 127.0.0.1:5432 | Leader | running | 3 | | clonefrom: true | | | | | | | | noloadbalance: true | | | | | | | | nosync: true | +-------------+----------------+--------+---------+----+-----------+---------------------+ | postgresql1 | 127.0.0.1:5433 | | running | 3 | 0.0 | | +-------------+----------------+--------+---------+----+-----------+---------------------+ - Fail if a config file is specified explicitly but not found (Kaarel Moppel) Previously ``patronictl`` was only reporting a ``DEBUG`` message. - Solved the problem of not initialized K8s pod breaking patronictl (Alexander Kukushkin) Patroni is relying on certain pod annotations on K8s. When one of the Patroni pods is stopping or starting there is no valid annotation yet and ``patronictl`` was failing with an exception. **Stability improvements** - Apply 1 second backoff if LIST call to K8s API server failed (Alexander Kukushkin) It is mostly necessary to avoid flooding logs, but also helps to prevent starvation of the main thread. - Retry if the ``retry-after`` HTTP header is returned by K8s API (Alexander Kukushkin) If the K8s API server is overwhelmed with requests it might ask to retry. - Scrub ``KUBERNETES_`` environment from the postmaster (Feike Steenbergen) The ``KUBERNETES_`` environment variables are not required for PostgreSQL, yet having them exposed to the postmaster will also expose them to backends and to regular database users (using pl/perl for example). - Clean up tablespaces on reinitialize (Krishna Sarabu) During reinit, Patroni was removing only ``PGDATA`` and leaving user-defined tablespace directories. This is causing Patroni to loop in reinit. The previous workarond for the problem was implementing the :ref:`custom bootstrap ` script. - Explicitly execute ``CHECKPOINT`` after promote happened (Alexander Kukushkin) It helps to reduce the time before the new primary is usable for ``pg_rewind``. - Smart refresh of Etcd members (Alexander Kukushkin) In case Patroni failed to execute a request on all members of the Etcd cluster, Patroni will re-check ``A`` or ``SRV`` records for changes of IPs/hosts before retrying the next time. - Skip missing values from ``pg_controldata`` (Feike Steenbergen) Values are missing when trying to use binaries of a version that doesn't match PGDATA. Patroni will try to start Postgres anyway, and Postgres will complain that the major version doesn't match and abort with an error. **Bugfixes** - Disable SSL verification for Consul when required (Julien Riou) Starting from a certain version of ``urllib3``, the ``cert_reqs`` must be explicitly set to ``ssl.CERT_NONE`` in order to effectively disable SSL verification. - Avoid opening replication connection on every cycle of HA loop (Alexander Kukushkin) Regression was introduced in 1.6.4. - Call ``on_role_change`` callback on failed primary (Alexander Kukushkin) In certain cases it could lead to the virtual IP remaining attached to the old primary. Regression was introduced in 1.4.5. - Reset rewind state if postgres started after successful pg_rewind (Alexander Kukushkin) As a result of this bug Patroni was starting up manually shut down postgres in the pause mode. - Convert ``recovery_min_apply_delay`` to ``ms`` when checking ``recovery.conf`` Patroni was indefinitely restarting replica if ``recovery_min_apply_delay`` was configured on PostgreSQL older than 12. - PyInstaller compatibility (Alexander Kukushkin) PyInstaller freezes (packages) Python applications into stand-alone executables. The compatibility was broken when we switched to the ``spawn`` method instead of ``fork`` for ``multiprocessing``. Version 1.6.4 ------------- **New features** - Implemented ``--wait`` option for ``patronictl reinit`` (Igor Yanchenko) Patronictl will wait for ``reinit`` to finish is the ``--wait`` option is used. - Further improvements of Windows support (Igor Yanchenko, Alexander Kukushkin) 1. All shell scripts which are used for integration testing are rewritten in python 2. The ``pg_ctl kill`` will be used to stop postgres on non posix systems 3. Don't try to use unix-domain sockets **Stability improvements** - Make sure ``unix_socket_directories`` and ``stats_temp_directory`` exist (Igor Yanchenko) Upon the start of Patroni and Postgres make sure that ``unix_socket_directories`` and ``stats_temp_directory`` exist or try to create them. Patroni will exit if failed to create them. - Make sure ``postgresql.pgpass`` is located in the place where Patroni has write access (Igor Yanchenko) In case if it doesn't have a write access Patroni will exit with exception. - Disable Consul ``serfHealth`` check by default (Kostiantyn Nemchenko) Even in case of little network problems the failing ``serfHealth`` leads to invalidation of all sessions associated with the node. Therefore, the leader key is lost much earlier than ``ttl`` which causes unwanted restarts of replicas and maybe demotion of the primary. - Configure tcp keepalives for connections to K8s API (Alexander Kukushkin) In case if we get nothing from the socket after TTL seconds it can be considered dead. - Avoid logging of passwords on user creation (Alexander Kukushkin) If the password is rejected or logging is configured to verbose or not configured at all it might happen that the password is written into postgres logs. In order to avoid it Patroni will change ``log_statement``, ``log_min_duration_statement``, and ``log_min_error_statement`` to some safe values before doing the attempt to create/update user. **Bugfixes** - Use ``restore_command`` from the ``standby_cluster`` config on cascading replicas (Alexander Kukushkin) The ``standby_leader`` was already doing it from the beginning the feature existed. Not doing the same on replicas might prevent them from catching up with standby leader. - Update timeline reported by the standby cluster (Alexander Kukushkin) In case of timeline switch the standby cluster was correctly replicating from the primary but ``patronictl`` was reporting the old timeline. - Allow certain recovery parameters be defined in the custom_conf (Alexander Kukushkin) When doing validation of recovery parameters on replica Patroni will skip ``archive_cleanup_command``, ``promote_trigger_file``, ``recovery_end_command``, ``recovery_min_apply_delay``, and ``restore_command`` if they are not defined in the patroni config but in files other than ``postgresql.auto.conf`` or ``postgresql.conf``. - Improve handling of postgresql parameters with period in its name (Alexander Kukushkin) Such parameters could be defined by extensions where the unit is not necessarily a string. Changing the value might require a restart (for example ``pg_stat_statements.max``). - Improve exception handling during shutdown (Alexander Kukushkin) During shutdown Patroni is trying to update its status in the DCS. If the DCS is inaccessible an exception might be raised. Lack of exception handling was preventing logger thread from stopping. Version 1.6.3 ------------- **Bugfixes** - Don't expose password when running ``pg_rewind`` (Alexander Kukushkin) Bug was introduced in the `#1301 `__ - Apply connection parameters specified in the ``postgresql.authentication`` to ``pg_basebackup`` and custom replica creation methods (Alexander Kukushkin) They were relying on url-like connection string and therefore parameters never applied. Version 1.6.2 ------------- **New features** - Implemented ``patroni --version`` (Igor Yanchenko) It prints the current version of Patroni and exits. - Set the ``user-agent`` http header for all http requests (Alexander Kukushkin) Patroni is communicating with Consul, Etcd, and Kubernetes API via the http protocol. Having a specifically crafted ``user-agent`` (example: ``Patroni/1.6.2 Python/3.6.8 Linux``) might be useful for debugging and monitoring. - Make it possible to configure log level for exception tracebacks (Igor Yanchenko) If you set ``log.traceback_level=DEBUG`` the tracebacks will be visible only when ``log.level=DEBUG``. The default behavior remains the same. **Stability improvements** - Avoid importing all DCS modules when searching for the module required by the config file (Alexander Kukushkin) There is no need to import modules for Etcd, Consul, and Kubernetes if we need only e.g. Zookeeper. It helps to reduce memory usage and solves the problem of having INFO messages ``Failed to import smth``. - Removed python ``requests`` module from explicit requirements (Alexander Kukushkin) It wasn't used for anything critical, but causing a lot of problems when the new version of ``urllib3`` is released. - Improve handling of ``etcd.hosts`` written as a comma-separated string instead of YAML array (Igor Yanchenko) Previously it was failing when written in format ``host1:port1, host2:port2`` (the space character after the comma). **Usability improvements** - Don't force users to choose members from an empty list in ``patronictl`` (Igor Yanchenko) If the user provides a wrong cluster name, we will raise an exception rather than ask to choose a member from an empty list. - Make the error message more helpful if the REST API cannot bind (Igor Yanchenko) For an inexperienced user it might be hard to figure out what is wrong from the Python stacktrace. **Bugfixes** - Fix calculation of ``wal_buffers`` (Alexander Kukushkin) The base unit has been changed from 8 kB blocks to bytes in PostgreSQL 11. - Use ``passfile`` in ``primary_conninfo`` only on PostgreSQL 10+ (Alexander Kukushkin) On older versions there is no guarantee that ``passfile`` will work, unless the latest version of ``libpq`` is installed. Version 1.6.1 ------------- **New features** - Added ``PATRONICTL_CONFIG_FILE`` environment variable (msvechla) It allows configuring the ``--config-file`` argument for ``patronictl`` from the environment. - Implement ``patronictl history`` (Alexander Kukushkin) It shows the history of failovers/switchovers. - Pass ``-c statement_timeout=0`` in ``PGOPTIONS`` when doing ``pg_rewind`` (Alexander Kukushkin) It protects from the case when ``statement_timeout`` on the server is set to some small value and one of the statements executed by pg_rewind is canceled. - Allow lower values for PostgreSQL configuration (Soulou) Patroni didn't allow some of the PostgreSQL configuration parameters be set smaller than some hardcoded values. Now the minimal allowed values are smaller, default values have not been changed. - Allow for certificate-based authentication (Jonathan S. Katz) This feature enables certificate-based authentication for superuser, replication, rewind accounts and allows the user to specify the ``sslmode`` they wish to connect with. - Use the ``passfile`` in the ``primary_conninfo`` instead of password (Alexander Kukushkin) It allows to avoid setting ``600`` permissions on postgresql.conf - Perform ``pg_ctl reload`` regardless of config changes (Alexander Kukushkin) It is possible that some config files are not controlled by Patroni. When somebody is doing a reload via the REST API or by sending SIGHUP to the Patroni process, the usual expectation is that Postgres will also be reloaded. Previously it didn't happen when there were no changes in the ``postgresql`` section of Patroni config. - Compare all recovery parameters, not only ``primary_conninfo`` (Alexander Kukushkin) Previously the ``check_recovery_conf()`` method was only checking whether ``primary_conninfo`` has changed, never taking into account all other recovery parameters. - Make it possible to apply some recovery parameters without restart (Alexander Kukushkin) Starting from PostgreSQL 12 the following recovery parameters could be changed without restart: ``archive_cleanup_command``, ``promote_trigger_file``, ``recovery_end_command``, and ``recovery_min_apply_delay``. In future Postgres releases this list will be extended and Patroni will support it automatically. - Make it possible to change ``use_slots`` online (Alexander Kukushkin) Previously it required restarting Patroni and removing slots manually. - Remove only ``PATRONI_`` prefixed environment variables when starting up Postgres (Cody Coons) It will solve a lot of problems with running different Foreign Data Wrappers. **Stability improvements** - Use LIST + WATCH when working with K8s API (Alexander Kukushkin) It allows to efficiently receive object changes (pods, endpoints/configmaps) and makes less stress on K8s master nodes. - Improve the workflow when PGDATA is not empty during bootstrap (Alexander Kukushkin) According to the ``initdb`` source code it might consider a PGDATA empty when there are only ``lost+found`` and ``.dotfiles`` in it. Now Patroni does the same. If ``PGDATA`` happens to be non-empty, and at the same time not valid from the ``pg_controldata`` point of view, Patroni will complain and exit. - Avoid calling expensive ``os.listdir()`` on every HA loop (Alexander Kukushkin) When the system is under IO stress, ``os.listdir()`` could take a few seconds (or even minutes) to execute, badly affecting the HA loop of Patroni. This could even cause the leader key to disappear from DCS due to the lack of updates. There is a better and less expensive way to check that the PGDATA is not empty. Now we check the presence of the ``global/pg_control`` file in the PGDATA. - Some improvements in logging infrastructure (Alexander Kukushkin) Previously there was a possibility to loose the last few log lines on shutdown because the logging thread was a ``daemon`` thread. - Use ``spawn`` multiprocessing start method on python 3.4+ (Maciej Kowalczyk) It is a known `issue `__ in Python that threading and multiprocessing do not mix well. Switching from the default method ``fork`` to the ``spawn`` is a recommended workaround. Not doing so might result in the Postmaster starting process hanging and Patroni indefinitely reporting ``INFO: restarting after failure in progress``, while Postgres is actually up and running. **Improvements in REST API** - Make it possible to check client certificates in the REST API (Alexander Kukushkin) If the ``verify_client`` is set to ``required``, Patroni will check client certificates for all REST API calls. When it is set to ``optional``, client certificates are checked for all unsafe REST API endpoints. - Return the response code 503 for the ``GET /replica`` health check request if Postgres is not running (Alexander Anikin) Postgres might spend significant time in recovery before it starts accepting client connections. - Implement ``/history`` and ``/cluster`` endpoints (Alexander Kukushkin) The ``/history`` endpoint shows the content of the ``history`` key in DCS. The ``/cluster`` endpoint shows all cluster members and some service info like pending and scheduled restarts or switchovers. **Improvements in Etcd support** - Retry on Etcd RAFT internal error (Alexander Kukushkin) When the Etcd node is being shut down, it sends ``response code=300, data='etcdserver: server stopped'``, which was causing Patroni to demote the primary. - Don't give up on Etcd request retry too early (Alexander Kukushkin) When there were some network problems, Patroni was quickly exhausting the list of Etcd nodes and giving up without using the whole ``retry_timeout``, potentially resulting in demoting the primary. **Bugfixes** - Disable ``synchronous_commit`` when granting execute permissions to the ``pg_rewind`` user (kremius) If the bootstrap is done with ``synchronous_mode_strict: true`` the `GRANT EXECUTE` statement was waiting indefinitely due to the non-synchronous nodes being available. - Fix memory leak on python 3.7 (Alexander Kukushkin) Patroni is using ``ThreadingMixIn`` to process REST API requests and python 3.7 made threads spawn for every request non-daemon by default. - Fix race conditions in asynchronous actions (Alexander Kukushkin) There was a chance that ``patronictl reinit --force`` could be overwritten by the attempt to recover stopped Postgres. This ended up in a situation when Patroni was trying to start Postgres while basebackup was running. - Fix race condition in ``postmaster_start_time()`` method (Alexander Kukushkin) If the method is executed from the REST API thread, it requires a separate cursor object to be created. - Fix the problem of not promoting the sync standby that had a name containing upper case letters (Alexander Kukushkin) We converted the name to the lower case because Postgres was doing the same while comparing the ``application_name`` with the value in ``synchronous_standby_names``. - Kill all children along with the callback process before starting the new one (Alexander Kukushkin) Not doing so makes it hard to implement callbacks in bash and eventually can lead to the situation when two callbacks are running at the same time. - Fix 'start failed' issue (Alexander Kukushkin) Under certain conditions the Postgres state might be set to 'start failed' despite Postgres being up and running. Version 1.6.0 ------------- This version adds compatibility with PostgreSQL 12, makes is possible to run pg_rewind without superuser on PostgreSQL 11 and newer, and enables IPv6 support. **New features** - Psycopg2 was removed from requirements and must be installed independently (Alexander Kukushkin) Starting from 2.8.0 ``psycopg2`` was split into two different packages, ``psycopg2``, and ``psycopg2-binary``, which could be installed at the same time into the same place on the filesystem. In order to decrease dependency hell problem, we let a user choose how to install it. There are a few options available, please consult the :ref:`documentation `. - Compatibility with PostgreSQL 12 (Alexander Kukushkin) Starting from PostgreSQL 12 there is no ``recovery.conf`` anymore and all former recovery parameters are converted into `GUC `_. In order to protect from ``ALTER SYSTEM SET primary_conninfo`` or similar, Patroni will parse ``postgresql.auto.conf`` and remove all standby and recovery parameters from there. Patroni config remains backward compatible. For example despite ``restore_command`` being a GUC, one can still specify it in the ``postgresql.recovery_conf.restore_command`` section and Patroni will write it into ``postgresql.conf`` for PostgreSQL 12. - Make it possible to use ``pg_rewind`` without superuser on PostgreSQL 11 and newer (Alexander Kukushkin) If you want to use this feature please define ``username`` and ``password`` in the ``postgresql.authentication.rewind`` section of Patroni configuration file. For an already existing cluster you will have to create the user manually and ``GRANT EXECUTE`` permission on a few functions. You can find more details in the PostgreSQL `documentation `__. - Do a smart comparison of actual and desired ``primary_conninfo`` values on replicas (Alexander Kukushkin) It might help to avoid replica restart when you are converting an already existing primary-standby cluster to one managed by Patroni - IPv6 support (Alexander Kukushkin) There were two major issues. Patroni REST API service was listening only on ``0.0.0.0`` and IPv6 IP addresses used in the ``api_url`` and ``conn_url`` were not properly quoted. - Kerberos support (Ajith Vilas, Alexander Kukushkin) It makes possible using Kerberos authentication between Postgres nodes instead of defining passwords in Patroni configuration file - Manage ``pg_ident.conf`` (Alexander Kukushkin) This functionality works similarly to ``pg_hba.conf``: if the ``postgresql.pg_ident`` is defined in the config file or DCS, Patroni will write its value to ``pg_ident.conf``, however, if ``postgresql.parameters.ident_file`` is defined, Patroni will assume that ``pg_ident`` is managed from outside and not update the file. **Improvements in REST API** - Added ``/health`` endpoint (Wilfried Roset) It will return an HTTP status code only if PostgreSQL is running - Added ``/read-only`` and ``/read-write`` endpoints (Julien Riou) The ``/read-only`` endpoint enables reads balanced across replicas and the primary. The ``/read-write`` endpoint is an alias for ``/primary``, ``/leader`` and ``/master``. - Use ``SSLContext`` to wrap the REST API socket (Julien Riou) Usage of ``ssl.wrap_socket()`` is deprecated and was still allowing soon-to-be-deprecated protocols like TLS 1.1. **Logging improvements** - Two-step logging (Alexander Kukushkin) All log messages are first written into the in-memory queue and later they are asynchronously flushed into the stderr or file from a separate thread. The maximum queue size is limited (configurable). If the limit is reached, Patroni will start losing logs, which is still better than blocking the HA loop. - Enable debug logging for GET/OPTIONS API calls together with latency (Jan Tomsa) It will help with debugging of health-checks performed by HAProxy, Consul or other tooling that decides which node is the primary/replica. - Log exceptions caught in Retry (Daniel Kucera) Log the final exception when either the number of attempts or the timeout were reached. It will hopefully help to debug some issues when communication to DCS fails. **Improvements in patronictl** - Enhance dialogues for scheduled switchover and restart (Rafia Sabih) Previously dialogues did not take into account scheduled actions and therefore were misleading. - Check if config file exists (Wilfried Roset) Be verbose about configuration file when the given filename does not exists, instead of ignoring silently (which can lead to misunderstanding). - Add fallback value for ``EDITOR`` (Wilfried Roset) When the ``EDITOR`` environment variable was not defined, ``patronictl edit-config`` was failing with `PatroniCtlException`. The new strategy is to try ``editor`` and than ``vi``, which should be available on most systems. **Improvements in Consul support** - Allow to specify Consul consistency mode (Jan Tomsa) You can read more about consistency mode `here `__. - Reload Consul config on SIGHUP (Cameron Daniel Kucera, Alexander Kukushkin) It is especially useful when somebody is changing the value of ``token``. **Bugfixes** - Fix corner case in switchover/failover (Sharoon Thomas) The variable ``scheduled_at`` may be undefined if REST API is not accessible and we are using DCS as a fallback. - Open trust to localhost in ``pg_hba.conf`` during custom bootstrap (Alexander Kukushkin) Previously it was open only to unix_socket, which was causing a lot of errors: ``FATAL: no pg_hba.conf entry for replication connection from host "127.0.0.1", user "replicator"`` - Consider synchronous node as healthy even when the former leader is ahead (Alexander Kukushkin) If the primary loses access to the DCS, it restarts Postgres in read-only, but it might happen that other nodes can still access the old primary via the REST API. Such a situation was causing the synchronous standby not to promote because the old primary was reporting WAL position ahead of the synchronous standby. - Standby cluster bugfixes (Alexander Kukushkin) Make it possible to bootstrap a replica in a standby cluster when the standby_leader is not accessible and a few other minor fixes. Version 1.5.6 ------------- **New features** - Support work with etcd cluster via set of proxies (Alexander Kukushkin) It might happen that etcd cluster is not accessible directly but via set of proxies. In this case Patroni will not perform etcd topology discovery but just round-robin via proxy hosts. Behavior is controlled by `etcd.use_proxies`. - Changed callbacks behavior when role on the node is changed (Alexander Kukushkin) If the role was changed from `master` or `standby_leader` to `replica` or from `replica` to `standby_leader`, `on_restart` callback will not be called anymore in favor of `on_role_change` callback. - Change the way how we start postgres (Alexander Kukushkin) Use `multiprocessing.Process` instead of executing itself and `multiprocessing.Pipe` to transmit the postmaster pid to the Patroni process. Before that we were using pipes, what was leaving postmaster process with stdin closed. **Bug fixes** - Fix role returned by REST API for the standby leader (Alexander Kukushkin) It was incorrectly returning `replica` instead of `standby_leader` - Wait for callback end if it could not be killed (Julien Tachoires) Patroni doesn't have enough privileges to terminate the callback script running under `sudo` what was cancelling the new callback. If the running script could not be killed, Patroni will wait until it finishes and then run the next callback. - Reduce lock time taken by dcs.get_cluster method (Alexander Kukushkin) Due to the lock being held DCS slowness was affecting the REST API health checks causing false positives. - Improve cleaning of PGDATA when `pg_wal`/`pg_xlog` is a symlink (Julien Tachoires) In this case Patroni will explicitly remove files from the target directory. - Remove unnecessary usage of os.path.relpath (Ants Aasma) It depends on being able to resolve the working directory, what will fail if Patroni is started in a directory that is later unlinked from the filesystem. - Do not enforce ssl version when communicating with Etcd (Alexander Kukushkin) For some unknown reason python3-etcd on debian and ubuntu are not based on the latest version of the package and therefore it enforces TLSv1 which is not supported by Etcd v3. We solved this problem on Patroni side. Version 1.5.5 ------------- This version introduces the possibility of automatic reinit of the former master, improves patronictl list output and fixes a number of bugs. **New features** - Add support of `PATRONI_ETCD_PROTOCOL`, `PATRONI_ETCD_USERNAME` and `PATRONI_ETCD_PASSWORD` environment variables (Étienne M) Before it was possible to configure them only in the config file or as a part of `PATRONI_ETCD_URL`, which is not always convenient. - Make it possible to automatically reinit the former master (Alexander Kukushkin) If the pg_rewind is disabled or can't be used, the former master could fail to start as a new replica due to diverged timelines. In this case, the only way to fix it is wiping the data directory and reinitializing. This behavior could be changed by setting `postgresql.remove_data_directory_on_diverged_timelines`. When it is set, Patroni will wipe the data directory and reinitialize the former master automatically. - Show information about timelines in patronictl list (Alexander Kukushkin) It helps to detect stale replicas. In addition to that, `Host` will include ':{port}' if the port value isn't default or there is more than one member running on the same host. - Create a headless service associated with the $SCOPE-config endpoint (Alexander Kukushkin) The "config" endpoint keeps information about the cluster-wide Patroni and Postgres configuration, history file, and last but the most important, it holds the `initialize` key. When the Kubernetes master node is restarted or upgraded, it removes endpoints without services. The headless service will prevent it from being removed. **Bug fixes** - Adjust the read timeout for the leader watch blocking query (Alexander Kukushkin) According to the Consul documentation, the actual response timeout is increased by a small random amount of additional wait time added to the supplied maximum wait time to spread out the wake up time of any concurrent requests. It adds up to `wait / 16` additional time to the maximum duration. In our case we are adding `wait / 15` or 1 second depending on what is bigger. - Always use replication=1 when connecting via replication protocol to the postgres (Alexander Kukushkin) Starting from Postgres 10 the line in the pg_hba.conf with database=replication doesn't accept connections with the parameter replication=database. - Don't write primary_conninfo into recovery.conf for wal-only standby cluster (Alexander Kukushkin) Despite not having neither `host` nor `port` defined in the `standby_cluster` config, Patroni was putting the `primary_conninfo` into the `recovery.conf`, which is useless and generating a lot of errors. Version 1.5.4 ------------- This version implements flexible logging and fixes a number of bugs. **New features** - Improvements in logging infrastructure (Alexander Kukushkin, Lucas Capistrant, Alexander Anikin) Logging configuration could be configured not only from environment variables but also from Patroni config file. It makes it possible to change logging configuration in runtime by updating config and doing reload or sending SIGHUP to the Patroni process. By default Patroni writes logs to stderr, but now it becomes possible to write logs directly into the file and rotate when it reaches a certain size. In addition to that added support of custom dateformat and the possibility to fine-tune log level for each python module. - Make it possible to take into account the current timeline during leader elections (Alexander Kukushkin) It could happen that the node is considering itself as a healthiest one although it is currently not on the latest known timeline. In some cases we want to avoid promoting of such node, which could be achieved by setting `check_timeline` parameter to `true` (default behavior remains unchanged). - Relaxed requirements on superuser credentials Libpq allows opening connections without explicitly specifying neither username nor password. Depending on situation it relies either on pgpass file or trust authentication method in pg_hba.conf. Since pg_rewind is also using libpq, it will work the same way. - Implemented possibility to configure Consul Service registration and check interval via environment variables (Alexander Kukushkin) Registration of service in Consul was added in the 1.5.0, but so far it was only possible to turn it on via patroni.yaml. **Stability Improvements** - Set archive_mode to off during the custom bootstrap (Alexander Kukushkin) We want to avoid archiving wals and history files until the cluster is fully functional. It really helps if the custom bootstrap involves pg_upgrade. - Apply five seconds backoff when loading global config on start (Alexander Kukushkin) It helps to avoid hammering DCS when Patroni just starting up. - Reduce amount of error messages generated on shutdown (Alexander Kukushkin) They were harmless but rather annoying and sometimes scary. - Explicitly secure rw perms for recovery.conf at creation time (Lucas Capistrant) We don't want anybody except patroni/postgres user reading this file, because it contains replication user and password. - Redirect HTTPServer exceptions to logger (Julien Riou) By default, such exceptions were logged on standard output messing with regular logs. **Bug fixes** - Removed stderr pipe to stdout on pg_ctl process (Cody Coons) Inheriting stderr from the main Patroni process allows all Postgres logs to be seen along with all patroni logs. This is very useful in a container environment as Patroni and Postgres logs may be consumed using standard tools (docker logs, kubectl, etc). In addition to that, this change fixes a bug with Patroni not being able to catch postmaster pid when postgres writing some warnings into stderr. - Set Consul service check deregister timeout in Go time format (Pavel Kirillov) Without explicitly mentioned time unit registration was failing. - Relax checks of standby_cluster cluster configuration (Dmitry Dolgov, Alexander Kukushkin) It was accepting only strings as valid values and therefore it was not possible to specify the port as integer and create_replica_methods as a list. Version 1.5.3 ------------- Compatibility and bugfix release. - Improve stability when running with python3 against zookeeper (Alexander Kukushkin) Change of `loop_wait` was causing Patroni to disconnect from zookeeper and never reconnect back. - Fix broken compatibility with postgres 9.3 (Alexander Kukushkin) When opening a replication connection we should specify replication=1, because 9.3 does not understand replication='database' - Make sure we refresh Consul session at least once per HA loop and improve handling of consul sessions exceptions (Alexander Kukushkin) Restart of local consul agent invalidates all sessions related to the node. Not calling session refresh on time and not doing proper handling of session errors was causing demote of the primary. Version 1.5.2 ------------- Compatibility and bugfix release. - Compatibility with kazoo-2.6.0 (Alexander Kukushkin) In order to make sure that requests are performed with an appropriate timeout, Patroni redefines create_connection method from python-kazoo module. The last release of kazoo slightly changed the way how create_connection method is called. - Fix Patroni crash when Consul cluster loses the leader (Alexander Kukushkin) The crash was happening due to incorrect implementation of touch_member method, it should return boolean and not raise any exceptions. Version 1.5.1 ------------- This version implements support of permanent replication slots, adds support of pgBackRest and fixes number of bugs. **New features** - Permanent replication slots (Alexander Kukushkin) Permanent replication slots are preserved on failover/switchover, that is, Patroni on the new primary will create configured replication slots right after doing promote. Slots could be configured with the help of `patronictl edit-config`. The initial configuration could be also done in the :ref:`bootstrap.dcs `. - Add pgbackrest support (Yogesh Sharma) pgBackrest can restore in existing $PGDATA folder, this allows speedy restore as files which have not changed since last backup are skipped, to support this feature new parameter `keep_data` has been introduced. See :ref:`replica creation method ` section for additional examples. **Bug fixes** - A few bugfixes in the "standby cluster" workflow (Alexander Kukushkin) Please see https://github.com/zalando/patroni/pull/823 for more details. - Fix REST API health check when cluster management is paused and DCS is not accessible (Alexander Kukushkin) Regression was introduced in https://github.com/zalando/patroni/commit/90cf930036a9d5249265af15d2b787ec7517cf57 Version 1.5.0 ------------- This version enables Patroni HA cluster to operate in a standby mode, introduces experimental support for running on Windows, and provides a new configuration parameter to register PostgreSQL service in Consul. **New features** - Standby cluster (Dmitry Dolgov) One or more Patroni nodes can form a standby cluster that runs alongside the primary one (i.e. in another datacenter) and consists of standby nodes that replicate from the master in the primary cluster. All PostgreSQL nodes in the standby cluster are replicas; one of those replicas elects itself to replicate directly from the remote master, while the others replicate from it in a cascading manner. More detailed description of this feature and some configuration examples can be found at :ref:`here `. - Register Services in Consul (Pavel Kirillov, Alexander Kukushkin) If `register_service` parameter in the consul :ref:`configuration ` is enabled, the node will register a service with the name `scope` and the tag `master`, `replica` or `standby-leader`. - Experimental Windows support (Pavel Golub) From now on it is possible to run Patroni on Windows, although Windows support is brand-new and hasn't received as much real-world testing as its Linux counterpart. We welcome your feedback! **Improvements in patronictl** - Add patronictl -k/--insecure flag and support for restapi cert (Wilfried Roset) In the past if the REST API was protected by the self-signed certificates `patronictl` would fail to verify them. There was no way to disable that verification. It is now possible to configure `patronictl` to skip the certificate verification altogether or provide CA and client certificates in the :ref:`ctl: ` section of configuration. - Exclude members with nofailover tag from patronictl switchover/failover output (Alexander Anikin) Previously, those members were incorrectly proposed as candidates when performing interactive switchover or failover via patronictl. **Stability improvements** - Avoid parsing non-key-value output lines of pg_controldata (Alexander Anikin) Under certain circuimstances pg_controldata outputs lines without a colon character. That would trigger an error in Patroni code that parsed pg_controldata output, hiding the actual problem; often such lines are emitted in a warning shown by pg_controldata before the regular output, i.e. when the binary major version does not match the one of the PostgreSQL data directory. - Add member name to the error message during the leader election (Jan Mussler) During the leader election, Patroni connects to all known members of the cluster and requests their status. Such status is written to the Patroni log and includes the name of the member. Previously, if the member was not accessible, the error message did not indicate its name, containing only the URL. - Immediately reserve the WAL position upon creation of the replication slot (Alexander Kukushkin) Starting from 9.6, `pg_create_physical_replication_slot` function provides an additional boolean parameter `immediately_reserve`. When it is set to `false`, which is also the default, the slot doesn't reserve the WAL position until it receives the first client connection, potentially losing some segments required by the client in a time window between the slot creation and the initial client connection. - Fix bug in strict synchronous replication (Alexander Kukushkin) When running with `synchronous_mode_strict: true`, in some cases Patroni puts `*` into the `synchronous_standby_names`, changing the sync state for most of the replication connections to `potential`. Previously, Patroni couldn't pick a synchronous candidate under such curcuimstances, as it only considered those with the state `async`. Version 1.4.6 ------------- **Bug fixes and stability improvements** This release fixes a critical issue with Patroni API /master endpoint returning 200 for the non-master node. This is a reporting issue, no actual split-brain, but under certain circumstances clients might be directed to the read-only node. - Reset is_leader status on demote (Alexander Kukushkin, Oleksii Kliukin) Make sure demoted cluster member stops responding with code 200 on the /master API call. - Add new "cluster_unlocked" field to the API output (Dmitry Dolgov) This field indicates whether the cluster has the master running. It can be used when it is not possible to query any other node but one of the replicas. Version 1.4.5 ------------- **New features** - Improve logging when applying new postgres configuration (Don Seiler) Patroni logs changed parameter names and values. - Python 3.7 compatibility (Christoph Berg) async is a reserved keyword in python3.7 - Set state to "stopped" in the DCS when a member is shut down (Tony Sorrentino) This shows the member state as "stopped" in "patronictl list" command. - Improve the message logged when stale postmaster.pid matches a running process (Ants Aasma) The previous one was beyond confusing. - Implement patronictl reload functionality (Don Seiler) Before that it was only possible to reload configuration by either calling REST API or by sending SIGHUP signal to the Patroni process. - Take and apply some parameters from controldata when starting as a replica (Alexander Kukushkin) The value of `max_connections` and some other parameters set in the global configuration may be lower than the one actually used by the primary; when this happens, the replica cannot start and should be fixed manually. Patroni takes care of that now by reading and applying the value from `pg_controldata`, starting postgres and setting `pending_restart` flag. - If set, use LD_LIBRARY_PATH when starting postgres (Chris Fraser) When starting up Postgres, Patroni was passing along PATH, LC_ALL and LANG env vars if they are set. Now it is doing the same with LD_LIBRARY_PATH. It should help if somebody installed PostgreSQL to non-standard place. - Rename create_replica_method to create_replica_methods (Dmitry Dolgov) To make it clear that it's actually an array. The old name is still supported for backward compatibility. **Bug fixes and stability improvements** - Fix condition for the replica start due to pg_rewind in paused state (Oleksii Kliukin) Avoid starting the replica that had already executed pg_rewind before. - Respond 200 to the master health-check only if update_lock has been successful (Alexander Kukushkin) Prevent Patroni from reporting itself a master on the former (demoted) master if DCS is partitioned. - Fix compatibility with the new consul module (Alexander Kukushkin) Starting from v1.1.0 python-consul changed internal API and started using `list` instead of `dict` to pass query parameters. - Catch exceptions from Patroni REST API thread during shutdown (Alexander Kukushkin) Those uncaught exceptions kept PostgreSQL running at shutdown. - Do crash recovery only when Postgres runs as the master (Alexander Kukushkin) Require `pg_controldata` to report 'in production' or 'shutting down' or 'in crash recovery'. In all other cases no crash recovery is necessary. - Improve handling of configuration errors (Henning Jacobs, Alexander Kukushkin) It is possible to change a lot of parameters in runtime (including `restapi.listen`) by updating Patroni config file and sending SIGHUP to Patroni process. This fix eliminates obscure exceptions from the 'restapi' thread when some of the parameters receive invalid values. Version 1.4.4 ------------- **Stability improvements** - Fix race condition in poll_failover_result (Alexander Kukushkin) It didn't affect directly neither failover nor switchover, but in some rare cases it was reporting success too early, when the former leader released the lock, producing a 'Failed over to "None"' instead of 'Failed over to "desired-node"' message. - Treat Postgres parameter names as case insensitive (Alexander Kukushkin) Most of the Postgres parameters have snake_case names, but there are three exceptions from this rule: DateStyle, IntervalStyle and TimeZone. Postgres accepts those parameters when written in a different case (e.g. timezone = 'some/tzn'); however, Patroni was unable to find case-insensitive matches of those parameter names in pg_settings and ignored such parameters as a result. - Abort start if attaching to running postgres and cluster not initialized (Alexander Kukushkin) Patroni can attach itself to an already running Postgres instance. It is imperative to start running Patroni on the master node before getting to the replicas. - Fix behavior of patronictl scaffold (Alexander Kukushkin) Pass dict object to touch_member instead of json encoded string, DCS implementation will take care of encoding it. - Don't demote master if failed to update leader key in pause (Alexander Kukushkin) During maintenance a DCS may start failing write requests while continuing to responds to read ones. In that case, Patroni used to put the Postgres master node to a read-only mode after failing to update the leader lock in DCS. - Sync replication slots when Patroni notices a new postmaster process (Alexander Kukushkin) If Postgres has been restarted, Patroni has to make sure that list of replication slots matches its expectations. - Verify sysid and sync replication slots after coming out of pause (Alexander Kukushkin) During the `maintenance` mode it may happen that data directory was completely rewritten and therefore we have to make sure that `Database system identifier` still belongs to our cluster and replication slots are in sync with Patroni expectations. - Fix a possible failure to start not running Postgres on a data directory with postmaster lock file present (Alexander Kukushkin) Detect reuse of PID from the postmaster lock file. More likely to hit such problem if you run Patroni and Postgres in the docker container. - Improve protection of DCS being accidentally wiped (Alexander Kukushkin) Patroni has a lot of logic in place to prevent failover in such case; it can also restore all keys back; however, until this change an accidental removal of /config key was switching off pause mode for 1 cycle of HA loop. - Do not exit when encountering invalid system ID (Oleksii Kliukin) Do not exit when the cluster system ID is empty or the one that doesn't pass the validation check. In that case, the cluster most likely needs a reinit; mention it in the result message. Avoid terminating Patroni, as otherwise reinit cannot happen. **Compatibility with Kubernetes 1.10+** - Added check for empty subsets (Cody Coons) Kubernetes 1.10.0+ started returning `Endpoints.subsets` set to `None` instead of `[]`. **Bootstrap improvements** - Make deleting recovery.conf optional (Brad Nicholson) If `bootstrap..keep_existing_recovery_conf` is defined and set to ``True``, Patroni will not remove the existing ``recovery.conf`` file. This is useful when bootstrapping from a backup with tools like pgBackRest that generate the appropriate `recovery.conf` for you. - Allow options to the basebackup built-in method (Oleksii Kliukin) It is now possible to supply options to the built-in basebackup method by defining the `basebackup` section in the configuration, similar to how those are defined for custom replica creation methods. The difference is in the format accepted by the `basebackup` section: since pg_basebackup accepts both `--key=value` and `--key` options, the contents of the section could be either a dictionary of key-value pairs, or a list of either one-element dictionaries or just keys (for the options that don't accept values). See :ref:`replica creation method ` section for additional examples. Version 1.4.3 ------------- **Improvements in logging** - Make log level configurable from environment variables (Andy Newton, Keyvan Hedayati) `PATRONI_LOGLEVEL` - sets the general logging level `PATRONI_REQUESTS_LOGLEVEL` - sets the logging level for all HTTP requests e.g. Kubernetes API calls See `the docs for Python logging ` to get the names of possible log levels **Stability improvements and bug fixes** - Don't rediscover etcd cluster topology when watch timed out (Alexander Kukushkin) If we have only one host in etcd configuration and exactly this host is not accessible, Patroni was starting discovery of cluster topology and never succeeding. Instead it should just switch to the next available node. - Write content of bootstrap.pg_hba into a pg_hba.conf after custom bootstrap (Alexander Kukushkin) Now it behaves similarly to the usual bootstrap with `initdb` - Single user mode was waiting for user input and never finish (Alexander Kukushkin) Regression was introduced in https://github.com/zalando/patroni/pull/576 Version 1.4.2 ------------- **Improvements in patronictl** - Rename scheduled failover to scheduled switchover (Alexander Kukushkin) Failover and switchover functions were separated in version 1.4, but `patronictl list` was still reporting `Scheduled failover` instead of `Scheduled switchover`. - Show information about pending restarts (Alexander Kukushkin) In order to apply some configuration changes sometimes it is necessary to restart postgres. Patroni was already giving a hint about that in the REST API and when writing node status into DCS, but there were no easy way to display it. - Make show-config to work with cluster_name from config file (Alexander Kukushkin) It works similar to the `patronictl edit-config` **Stability improvements** - Avoid calling pg_controldata during bootstrap (Alexander Kukushkin) During initdb or custom bootstrap there is a time window when pgdata is not empty but pg_controldata has not been written yet. In such case pg_controldata call was failing with error messages. - Handle exceptions raised from psutil (Alexander Kukushkin) cmdline is read and parsed every time when `cmdline()` method is called. It could happen that the process being examined has already disappeared, in that case `NoSuchProcess` is raised. **Kubernetes support improvements** - Don't swallow errors from k8s API (Alexander Kukushkin) A call to Kubernetes API could fail for a different number of reasons. In some cases such call should be retried, in some other cases we should log the error message and the exception stack trace. The change here will help debug Kubernetes permission issues. - Update Kubernetes example Dockerfile to install Patroni from the master branch (Maciej Szulik) Before that it was using `feature/k8s`, which became outdated. - Add proper RBAC to run patroni on k8s (Maciej Szulik) Add the Service account that is assigned to the pods of the cluster, the role that holds only the necessary permissions, and the rolebinding that connects the Service account and the Role. Version 1.4.1 ------------- **Fixes in patronictl** - Don't show current leader in suggested list of members to failover to. (Alexander Kukushkin) patronictl failover could still work when there is leader in the cluster and it should be excluded from the list of member where it is possible to failover to. - Make patronictl switchover compatible with the old Patroni api (Alexander Kukushkin) In case if POST /switchover REST API call has failed with status code 501 it will do it once again, but to /failover endpoint. Version 1.4 ----------- This version adds support for using Kubernetes as a DCS, allowing to run Patroni as a cloud-native agent in Kubernetes without any additional deployments of Etcd, Zookeeper or Consul. **Upgrade notice** Installing Patroni via pip will no longer bring in dependencies for (such as libraries for Etcd, Zookeper, Consul or Kubernetes, or support for AWS). In order to enable them one need to list them in pip install command explicitly, for instance `pip install patroni[etcd,kubernetes]`. **Kubernetes support** Implement Kubernetes-based DCS. The endpoints meta-data is used in order to store the configuration and the leader key. The meta-data field inside the pods definition is used to store the member-related data. In addition to using Endpoints, Patroni supports ConfigMaps. You can find more information about this feature in the :ref:`Kubernetes chapter of the documentation ` **Stability improvements** - Factor out postmaster process into a separate object (Ants Aasma) This object identifies a running postmaster process via pid and start time and simplifies detection (and resolution) of situations when the postmaster was restarted behind our back or when postgres directory disappeared from the file system. - Minimize the amount of SELECT's issued by Patroni on every loop of HA cylce (Alexander Kukushkin) On every iteration of HA loop Patroni needs to know recovery status and absolute wal position. From now on Patroni will run only single SELECT to get this information instead of two on the replica and three on the master. - Remove leader key on shutdown only when we have the lock (Ants Aasma) Unconditional removal was generating unnecessary and misleading exceptions. **Improvements in patronictl** - Add version command to patronictl (Ants Aasma) It will show the version of installed Patroni and versions of running Patroni instances (if the cluster name is specified). - Make optional specifying cluster_name argument for some of patronictl commands (Alexander Kukushkin, Ants Aasma) It will work if patronictl is using usual Patroni configuration file with the ``scope`` defined. - Show information about scheduled switchover and maintenance mode (Alexander Kukushkin) Before that it was possible to get this information only from Patroni logs or directly from DCS. - Improve ``patronictl reinit`` (Alexander Kukushkin) Sometimes ``patronictl reinit`` refused to proceed when Patroni was busy with other actions, namely trying to start postgres. `patronictl` didn't provide any commands to cancel such long running actions and the only (dangerous) workarond was removing a data directory manually. The new implementation of `reinit` forcefully cancells other long-running actions before proceeding with reinit. - Implement ``--wait`` flag in ``patronictl pause`` and ``patronictl resume`` (Alexander Kukushkin) It will make ``patronictl`` wait until the requested action is acknowledged by all nodes in the cluster. Such behaviour is achieved by exposing the ``pause`` flag for every node in DCS and via the REST API. - Rename ``patronictl failover`` into ``patronictl switchover`` (Alexander Kukushkin) The previous ``failover`` was actually only capable of doing a switchover; it refused to proceed in a cluster without the leader. - Alter the behavior of ``patronictl failover`` (Alexander Kukushkin) It will work even if there is no leader, but in that case you will have to explicitly specify a node which should become the new leader. **Expose information about timeline and history** - Expose current timeline in DCS and via API (Alexander Kukushkin) Store information about the current timeline for each member of the cluster. This information is accessible via the API and is stored in the DCS - Store promotion history in the /history key in DCS (Alexander Kukushkin) In addition, store the timeline history enriched with the timestamp of the corresponding promotion in the /history key in DCS and update it with each promote. **Add endpoints for getting synchronous and asynchronous replicas** - Add new /sync and /async endpoints (Alexander Kukushkin, Oleksii Kliukin) Those endpoints (also accessible as /synchronous and /asynchronous) return 200 only for synchronous and asynchronous replicas correspondingly (exclusing those marked as `noloadbalance`). **Allow multiple hosts for Etcd** - Add a new `hosts` parameter to Etcd configuration (Alexander Kukushkin) This parameter should contain the initial list of hosts that will be used to discover and populate the list of the running etcd cluster members. If for some reason during work this list of discovered hosts is exhausted (no available hosts from that list), Patroni will return to the initial list from the `hosts` parameter. Version 1.3.6 ------------- **Stability improvements** - Verify process start time when checking if postgres is running. (Ants Aasma) After a crash that doesn't clean up postmaster.pid there could be a new process with the same pid, resulting in a false positive for is_running(), which will lead to all kinds of bad behavior. - Shutdown postgresql before bootstrap when we lost data directory (ainlolcat) When data directory on the master is forcefully removed, postgres process can still stay alive for some time and prevent the replica created in place of that former master from starting or replicating. The fix makes Patroni cache the postmaster pid and its start time and let it terminate the old postmaster in case it is still running after the corresponding data directory has been removed. - Perform crash recovery in a single user mode if postgres master dies (Alexander Kukushkin) It is unsafe to start immediately as a standby and not possible to run ``pg_rewind`` if postgres hasn't been shut down cleanly. The single user crash recovery only kicks in if ``pg_rewind`` is enabled or there is no master at the moment. **Consul improvements** - Make it possible to provide datacenter configuration for Consul (Vilius Okockis, Alexander Kukushkin) Before that Patroni was always communicating with datacenter of the host it runs on. - Always send a token in X-Consul-Token http header (Alexander Kukushkin) If ``consul.token`` is defined in Patroni configuration, we will always send it in the 'X-Consul-Token' http header. python-consul module tries to be "consistent" with Consul REST API, which doesn't accept token as a query parameter for `session API `__, but it still works with 'X-Consul-Token' header. - Adjust session TTL if supplied value is smaller than the minimum possible (Stas Fomin, Alexander Kukushkin) It could happen that the TTL provided in the Patroni configuration is smaller than the minimum one supported by Consul. In that case, Consul agent fails to create a new session. Without a session Patroni cannot create member and leader keys in the Consul KV store, resulting in an unhealthy cluster. **Other improvements** - Define custom log format via environment variable ``PATRONI_LOGFORMAT`` (Stas Fomin) Allow disabling timestamps and other similar fields in Patroni logs if they are already added by the system logger (usually when Patroni runs as a service). Version 1.3.5 ------------- **Bugfix** - Set role to 'uninitialized' if data directory was removed (Alexander Kukushkin) If the node was running as a master it was preventing from failover. **Stability improvement** - Try to run postmaster in a single-user mode if we tried and failed to start postgres (Alexander Kukushkin) Usually such problem happens when node running as a master was terminated and timelines were diverged. If ``recovery.conf`` has ``restore_command`` defined, there are really high chances that postgres will abort startup and leave controldata unchanged. It makes impossible to use ``pg_rewind``, which requires a clean shutdown. **Consul improvements** - Make it possible to specify health checks when creating session (Alexander Kukushkin) If not specified, Consul will use "serfHealth". From one side it allows fast detection of isolated master, but from another side it makes it impossible for Patroni to tolerate short network lags. **Bugfix** - Fix watchdog on Python 3 (Ants Aasma) A misunderstanding of the ioctl() call interface. If mutable=False then fcntl.ioctl() actually returns the arg buffer back. This accidentally worked on Python2 because int and str comparison did not return an error. Error reporting is actually done by raising IOError on Python2 and OSError on Python3. Version 1.3.4 ------------- **Different Consul improvements** - Pass the consul token as a header (Andrew Colin Kissa) Headers are now the preferred way to pass the token to the consul `API `__. - Advanced configuration for Consul (Alexander Kukushkin) possibility to specify ``scheme``, ``token``, client and ca certificates :ref:`details `. - compatibility with python-consul-0.7.1 and above (Alexander Kukushkin) new python-consul module has changed signature of some methods - "Could not take out TTL lock" message was never logged (Alexander Kukushkin) Not a critical bug, but lack of proper logging complicates investigation in case of problems. **Quote synchronous_standby_names using quote_ident** - When writing ``synchronous_standby_names`` into the ``postgresql.conf`` its value must be quoted (Alexander Kukushkin) If it is not quoted properly, PostgreSQL will effectively disable synchronous replication and continue to work. **Different bugfixes around pause state, mostly related to watchdog** (Alexander Kukushkin) - Do not send keepalives if watchdog is not active - Avoid activating watchdog in a pause mode - Set correct postgres state in pause mode - Do not try to run queries from API if postgres is stopped Version 1.3.3 ------------- **Bugfixes** - synchronous replication was disabled shortly after promotion even when synchronous_mode_strict was turned on (Alexander Kukushkin) - create empty ``pg_ident.conf`` file if it is missing after restoring from the backup (Alexander Kukushkin) - open access in ``pg_hba.conf`` to all databases, not only postgres (Franco Bellagamba) Version 1.3.2 ------------- **Bugfix** - patronictl edit-config didn't work with ZooKeeper (Alexander Kukushkin) Version 1.3.1 ------------- **Bugfix** - failover via API was broken due to change in ``_MemberStatus`` (Alexander Kukushkin) Version 1.3 ----------- Version 1.3 adds custom bootstrap possibility, significantly improves support for pg_rewind, enhances the synchronous mode support, adds configuration editing to patronictl and implements watchdog support on Linux. In addition, this is the first version to work correctly with PostgreSQL 10. **Upgrade notice** There are no known compatibility issues with the new version of Patroni. Configuration from version 1.2 should work without any changes. It is possible to upgrade by installing new packages and either restarting Patroni (will cause PostgreSQL restart), or by putting Patroni into a :ref:`pause mode ` first and then restarting Patroni on all nodes in the cluster (Patroni in a pause mode will not attempt to stop/start PostgreSQL), resuming from the pause mode at the end. **Custom bootstrap** - Make the process of bootstrapping the cluster configurable (Alexander Kukushkin) Allow custom bootstrap scripts instead of ``initdb`` when initializing the very first node in the cluster. The bootstrap command receives the name of the cluster and the path to the data directory. The resulting cluster can be configured to perform recovery, making it possible to bootstrap from a backup and do point in time recovery. Refer to the :ref:`documentaton page ` for more detailed description of this feature. **Smarter pg_rewind support** - Decide on whether to run pg_rewind by looking at the timeline differences from the current master (Alexander Kukushkin) Previously, Patroni had a fixed set of conditions to trigger pg_rewind, namely when starting a former master, when doing a switchover to the designated node for every other node in the cluster or when there is a replica with the nofailover tag. All those cases have in common a chance that some replica may be ahead of the new master. In some cases, pg_rewind did nothing, in some other ones it was not running when necessary. Instead of relying on this limited list of rules make Patroni compare the master and the replica WAL positions (using the streaming replication protocol) in order to reliably decide if rewind is necessary for the replica. **Synchronous replication mode strict** - Enhance synchronous replication support by adding the strict mode (James Sewell, Alexander Kukushkin) Normally, when ``synchronous_mode`` is enabled and there are no replicas attached to the master, Patroni will disable synchronous replication in order to keep the master available for writes. The ``synchronous_mode_strict`` option changes that, when it is set Patroni will not disable the synchronous replication in a lack of replicas, effectively blocking all clients writing data to the master. In addition to the synchronous mode guarantee of preventing any data loss due to automatic failover, the strict mode ensures that each write is either durably stored on two nodes or not happening altogether if there is only one node in the cluster. **Configuration editing with patronictl** - Add configuration editing to patronictl (Ants Aasma, Alexander Kukushkin) Add the ability to patronictl of editing dynamic cluster configuration stored in DCS. Support either specifying the parameter/values from the command-line, invoking the $EDITOR, or applying configuration from the yaml file. **Linux watchdog support** - Implement watchdog support for Linux (Ants Aasma) Support Linux software watchdog in order to reboot the node where Patroni is not running or not responding (e.g because of the high load) The Linux software watchdog reboots the non-responsive node. It is possible to configure the watchdog device to use (`/dev/watchdog` by default) and the mode (on, automatic, off) from the watchdog section of the Patroni configuration. You can get more information from the :ref:`watchdog documentation `. **Add support for PostgreSQL 10** - Patroni is compatible with all beta versions of PostgreSQL 10 released so far and we expect it to be compatible with the PostgreSQL 10 when it will be released. **PostgreSQL-related minor improvements** - Define pg_hba.conf via the Patroni configuration file or the dynamic configuration in DCS (Alexander Kukushkin) Allow to define the contents of ``pg_hba.conf`` in the ``pg_hba`` sub-section of the ``postgresql`` section of the configuration. This simplifies managing ``pg_hba.conf`` on multiple nodes, as one needs to define it only ones in DCS instead of logging to every node, changing it manually and reload the configuration. When defined, the contents of this section will replace the current ``pg_hba.conf`` completely. Patroni ignores it if ``hba_file`` PostgreSQL parameter is set. - Support connecting via a UNIX socket to the local PostgreSQL cluster (Alexander Kukushkin) Add the ``use_unix_socket`` option to the ``postgresql`` section of Patroni configuration. When set to true and the PostgreSQL ``unix_socket_directories`` option is not empty, enables Patroni to use the first value from it to connect to the local PostgreSQL cluster. If ``unix_socket_directories`` is not defined, Patroni will assume its default value and omit the ``host`` parameter in the PostgreSQL connection string altogether. - Support change of superuser and replication credentials on reload (Alexander Kukushkin) - Support storing of configuration files outside of PostgreSQL data directory (@jouir) Add the new configuration ``postgresql`` configuration directive ``config_dir``. It defaults to the data directory and must be writable by Patroni. **Bug fixes and stability improvements** - Handle EtcdEventIndexCleared and EtcdWatcherCleared exceptions (Alexander Kukushkin) Faster recovery when the watch operation is ended by Etcd by avoiding useless retries. - Remove error spinning on Etcd failure and reduce log spam (Ants Aasma) Avoid immediate retrying and emitting stack traces in the log on the second and subsequent Etcd connection failures. - Export locale variables when forking PostgreSQL processes (Oleksii Kliukin) Avoid the `postmaster became multithreaded during startup` fatal error on non-English locales for PostgreSQL built with NLS. - Extra checks when dropping the replication slot (Alexander Kukushkin) In some cases Patroni is prevented from dropping the replication slot by the WAL sender. - Truncate the replication slot name to 63 (NAMEDATALEN - 1) characters to comply with PostgreSQL naming rules (Nick Scott) - Fix a race condition resulting in extra connections being opened to the PostgreSQL cluster from Patroni (Alexander Kukushkin) - Release the leader key when the node restarts with an empty data directory (Alex Kerney) - Set asynchronous executor busy when running bootstrap without a leader (Alexander Kukushkin) Failure to do so could have resulted in errors stating the node belonged to a different cluster, as Patroni proceeded with the normal business while being bootstrapped by a bootstrap method that doesn't require a leader to be present in the cluster. - Improve WAL-E replica creation method (Joar Wandborg, Alexander Kukushkin). - Use csv.DictReader when parsing WAL-E base backup, accepting ISO dates with space-delimited date and time. - Support fetching current WAL position from the replica to estimate the amount of WAL to restore. Previously, the code used to call system information functions that were available only on the master node. Version 1.2 ----------- This version introduces significant improvements over the handling of synchronous replication, makes the startup process and failover more reliable, adds PostgreSQL 9.6 support and fixes plenty of bugs. In addition, the documentation, including these release notes, has been moved to https://patroni.readthedocs.io. **Synchronous replication** - Add synchronous replication support. (Ants Aasma) Adds a new configuration variable ``synchronous_mode``. When enabled, Patroni will manage ``synchronous_standby_names`` to enable synchronous replication whenever there are healthy standbys available. When synchronous mode is enabled, Patroni will automatically fail over only to a standby that was synchronously replicating at the time of the master failure. This effectively means that no user visible transaction gets lost in such a case. See the :ref:`feature documentation ` for the detailed description and implementation details. **Reliability improvements** - Do not try to update the leader position stored in the ``leader optime`` key when PostgreSQL is not 100% healthy. Demote immediately when the update of the leader key failed. (Alexander Kukushkin) - Exclude unhealthy nodes from the list of targets to clone the new replica from. (Alexander Kukushkin) - Implement retry and timeout strategy for Consul similar to how it is done for Etcd. (Alexander Kukushkin) - Make ``--dcs`` and ``--config-file`` apply to all options in ``patronictl``. (Alexander Kukushkin) - Write all postgres parameters into postgresql.conf. (Alexander Kukushkin) It allows starting PostgreSQL configured by Patroni with just ``pg_ctl``. - Avoid exceptions when there are no users in the config. (Kirill Pushkin) - Allow pausing an unhealthy cluster. Before this fix, ``patronictl`` would bail out if the node it tries to execute pause on is unhealthy. (Alexander Kukushkin) - Improve the leader watch functionality. (Alexander Kukushkin) Previously the replicas were always watching the leader key (sleeping until the timeout or the leader key changes). With this change, they only watch when the replica's PostgreSQL is in the ``running`` state and not when it is stopped/starting or restarting PostgreSQL. - Avoid running into race conditions when handling SIGCHILD as a PID 1. (Alexander Kukushkin) Previously a race condition could occur when running inside the Docker containers, since the same process inside Patroni both spawned new processes and handled SIGCHILD from them. This change uses fork/execs for Patroni and leaves the original PID 1 process responsible for handling signals from children. - Fix WAL-E restore. (Oleksii Kliukin) Previously WAL-E restore used the ``no_master`` flag to avoid consulting with the master altogether, making Patroni always choose restoring from WAL over the ``pg_basebackup``. This change reverts it to the original meaning of ``no_master``, namely Patroni WAL-E restore may be selected as a replication method if the master is not running. The latter is checked by examining the connection string passed to the method. In addition, it makes the retry mechanism more robust and handles other minutia. - Implement asynchronous DNS resolver cache. (Alexander Kukushkin) Avoid failing when DNS is temporary unavailable (for instance, due to an excessive traffic received by the node). - Implement starting state and master start timeout. (Ants Aasma, Alexander Kukushkin) Previously ``pg_ctl`` waited for a timeout and then happily trodded on considering PostgreSQL to be running. This caused PostgreSQL to show up in listings as running when it was actually not and caused a race condition that resulted in either a failover, or a crash recovery, or a crash recovery interrupted by failover and a missed rewind. This change adds a ``master_start_timeout`` parameter and introduces a new state for the main HA loop: ``starting``. When ``master_start_timeout`` is 0 we will failover immediately when the master crashes as soon as there is a failover candidate. Otherwise, Patroni will wait after attempting to start PostgreSQL on the master for the duration of the timeout; when it expires, it will failover if possible. Manual failover requests will be honored during the crash of the master even before the timeout expiration. Introduce the ``timeout`` parameter to the ``restart`` API endpoint and ``patronictl``. When it is set and restart takes longer than the timeout, PostgreSQL is considered unhealthy and the other nodes becomes eligible to take the leader lock. - Fix ``pg_rewind`` behavior in a pause mode. (Ants Aasma) Avoid unnecessary restart in a pause mode when Patroni thinks it needs to rewind but rewind is not possible (i.e. ``pg_rewind`` is not present). Fallback to default ``libpq`` values for the ``superuser`` (default OS user) if ``superuser`` authentication is missing from the ``pg_rewind`` related Patroni configuration section. - Serialize callback execution. Kill the previous callback of the same type when the new one is about to run. Fix the issue of spawning zombie processes when running callbacks. (Alexander Kukushkin) - Avoid promoting a former master when the leader key is set in DCS but update to this leader key fails. (Alexander Kukushkin) This avoids the issue of a current master continuing to keep its role when it is partitioned together with the minority of nodes in Etcd and other DCSs that allow "inconsistent reads". **Miscellaneous** - Add ``post_init`` configuration option on bootstrap. (Alejandro Martínez) Patroni will call the script argument of this option right after running ``initdb`` and starting up PostgreSQL for a new cluster. The script receives a connection URL with ``superuser`` and sets ``PGPASSFILE`` to point to the ``.pgpass`` file containing the password. If the script fails, Patroni initialization fails as well. It is useful for adding new users or creating extensions in the new cluster. - Implement PostgreSQL 9.6 support. (Alexander Kukushkin) Use ``wal_level = replica`` as a synonym for ``hot_standby``, avoiding pending_restart flag when it changes from one to another. (Alexander Kukushkin) **Documentation improvements** - Add a Patroni main `loop workflow diagram `__. (Alejandro Martínez, Alexander Kukushkin) - Improve README, adding the Helm chart and links to release notes. (Lauri Apple) - Move Patroni documentation to ``Read the Docs``. The up-to-date documentation is available at https://patroni.readthedocs.io. (Oleksii Kliukin) Makes the documentation easily viewable from different devices (including smartphones) and searchable. - Move the package to the semantic versioning. (Oleksii Kliukin) Patroni will follow the major.minor.patch version schema to avoid releasing the new minor version on small but critical bugfixes. We will only publish the release notes for the minor version, which will include all patches. Version 1.1 ----------- This release improves management of Patroni cluster by bring in pause mode, improves maintenance with scheduled and conditional restarts, makes Patroni interaction with Etcd or Zookeeper more resilient and greatly enhances patronictl. **Upgrade notice** When upgrading from releases below 1.0 read about changing of credentials and configuration format at 1.0 release notes. **Pause mode** - Introduce pause mode to temporary detach Patroni from managing PostgreSQL instance (Murat Kabilov, Alexander Kukushkin, Oleksii Kliukin). Previously, one had to send SIGKILL signal to Patroni to stop it without terminating PostgreSQL. The new pause mode detaches Patroni from PostgreSQL cluster-wide without terminating Patroni. It is similar to the maintenance mode in Pacemaker. Patroni is still responsible for updating member and leader keys in DCS, but it will not start, stop or restart PostgreSQL server in the process. There are a few exceptions, for instance, manual failovers, reinitializes and restarts are still allowed. You can read :ref:`a detailed description of this feature `. In addition, patronictl supports new ``pause`` and ``resume`` commands to toggle the pause mode. **Scheduled and conditional restarts** - Add conditions to the restart API command (Oleksii Kliukin) This change enhances Patroni restarts by adding a couple of conditions that can be verified in order to do the restart. Among the conditions are restarting when PostgreSQL role is either a master or a replica, checking the PostgreSQL version number or restarting only when restart is necessary in order to apply configuration changes. - Add scheduled restarts (Oleksii Kliukin) It is now possible to schedule a restart in the future. Only one scheduled restart per node is supported. It is possible to clear the scheduled restart if it is not needed anymore. A combination of scheduled and conditional restarts is supported, making it possible, for instance, to scheduled minor PostgreSQL upgrades in the night, restarting only the instances that are running the outdated minor version without adding postgres-specific logic to administration scripts. - Add support for conditional and scheduled restarts to patronictl (Murat Kabilov). patronictl restart supports several new options. There is also patronictl flush command to clean the scheduled actions. **Robust DCS interaction** - Set Kazoo timeouts depending on the loop_wait (Alexander Kukushkin) Originally, ping_timeout and connect_timeout values were calculated from the negotiated session timeout. Patroni loop_wait was not taken into account. As a result, a single retry could take more time than the session timeout, forcing Patroni to release the lock and demote. This change set ping and connect timeout to half of the value of loop_wait, speeding up detection of connection issues and leaving enough time to retry the connection attempt before losing the lock. - Update Etcd topology only after original request succeed (Alexander Kukushkin) Postpone updating the Etcd topology known to the client until after the original request. When retrieving the cluster topology, implement the retry timeouts depending on the known number of nodes in the Etcd cluster. This makes our client prefer to get the results of the request to having the up-to-date list of nodes. Both changes make Patroni connections to DCS more robust in the face of network issues. **Patronictl, monitoring and configuration** - Return information about streaming replicas via the API (Feike Steenbergen) Previously, there was no reliable way to query Patroni about PostgreSQL instances that fail to stream changes (for instance, due to connection issues). This change exposes the contents of pg_stat_replication via the /patroni endpoint. - Add patronictl scaffold command (Oleksii Kliukin) Add a command to create cluster structure in Etcd. The cluster is created with user-specified sysid and leader, and both leader and member keys are made persistent. This command is useful to create so-called master-less configurations, where Patroni cluster consisting of only replicas replicate from the external master node that is unaware of Patroni. Subsequently, one may remove the leader key, promoting one of the Patroni nodes and replacing the original master with the Patroni-based HA cluster. - Add configuration option ``bin_dir`` to locate PostgreSQL binaries (Ants Aasma) It is useful to be able to specify the location of PostgreSQL binaries explicitly when Linux distros that support installing multiple PostgreSQL versions at the same time. - Allow configuration file path to be overridden using ``custom_conf`` of (Alejandro Martínez) Allows for custom configuration file paths, which will be unmanaged by Patroni, :ref:`details `. **Bug fixes and code improvements** - Make Patroni compatible with new version schema in PostgreSQL 10 and above (Feike Steenbergen) Make sure that Patroni understand 2-digits version numbers when doing conditional restarts based on the PostgreSQL version. - Use pkgutil to find DCS modules (Alexander Kukushkin) Use the dedicated python module instead of traversing directories manually in order to find DCS modules. - Always call on_start callback when starting Patroni (Alexander Kukushkin) Previously, Patroni did not call any callbacks when attaching to the already running node with the correct role. Since callbacks are often used to route client connections that could result in the failure to register the running node in the connection routing scheme. With this fix, Patroni calls on_start callback even when attaching to the already running node. - Do not drop active replication slots (Murat Kabilov, Oleksii Kliukin) Avoid dropping active physical replication slots on master. PostgreSQL cannot drop such slots anyway. This change makes possible to run non-Patroni managed replicas/consumers on the master. - Close Patroni connections during start of the PostgreSQL instance (Alexander Kukushkin) Forces Patroni to close all former connections when PostgreSQL node is started. Avoids the trap of reusing former connections if postmaster was killed with SIGKILL. - Replace invalid characters when constructing slot names from member names (Ants Aasma) Make sure that standby names that do not comply with the slot naming rules don't cause the slot creation and standby startup to fail. Replace the dashes in the slot names with underscores and all other characters not allowed in slot names with their unicode codepoints. Version 1.0 ----------- This release introduces the global dynamic configuration that allows dynamic changes of the PostgreSQL and Patroni configuration parameters for the entire HA cluster. It also delivers numerous bugfixes. **Upgrade notice** When upgrading from v0.90 or below, always upgrade all replicas before the master. Since we don't store replication credentials in DCS anymore, an old replica won't be able to connect to the new master. **Dynamic Configuration** - Implement the dynamic global configuration (Alexander Kukushkin) Introduce new REST API endpoint /config to provide PostgreSQL and Patroni configuration parameters that should be set globally for the entire HA cluster (master and all the replicas). Those parameters are set in DCS and in many cases can be applied without disrupting PostgreSQL or Patroni. Patroni sets a special flag called "pending restart" visible via the API when some of the values require the PostgreSQL restart. In that case, restart should be issued manually via the API. Patroni SIGHUP or POST to /reload will make it re-read the configuration file. See the :ref:`Patroni configuration ` for the details on which parameters can be changed and the order of processing difference configuration sources. The configuration file format *has changed* since the v0.90. Patroni is still compatible with the old configuration files, but in order to take advantage of the bootstrap parameters one needs to change it. Users are encourage to update them by referring to the :ref:`dynamic configuration documentation page `. **More flexible configuration*** - Make postgresql configuration and database name Patroni connects to configurable (Misja Hoebe) Introduce `database` and `config_base_name` configuration parameters. Among others, it makes possible to run Patroni with PipelineDB and other PostgreSQL forks. - Implement possibility to configure some Patroni configuration parameters via environment (Alexander Kukushkin) Those include the scope, the node name and the namespace, as well as the secrets and makes it easier to run Patroni in a dynamic environment, i.e. Kubernetes Please, refer to the :ref:`supported environment variables ` for further details. - Update the built-in Patroni docker container to take advantage of environment-based configuration (Feike Steenbergen). - Add Zookeeper support to Patroni docker image (Alexander Kukushkin) - Split the Zookeeper and Exhibitor configuration options (Alexander Kukushkin) - Make patronictl reuse the code from Patroni to read configuration (Alexander Kukushkin) This allows patronictl to take advantage of environment-based configuration. - Set application name to node name in primary_conninfo (Alexander Kukushkin) This simplifies identification and configuration of synchronous replication for a given node. **Stability, security and usability improvements** - Reset sysid and do not call pg_controldata when restore of backup in progress (Alexander Kukushkin) This change reduces the amount of noise generated by Patroni API health checks during the lengthy initialization of this node from the backup. - Fix a bunch of pg_rewind corner-cases (Alexander Kukushkin) Avoid running pg_rewind if the source cluster is not the master. In addition, avoid removing the data directory on an unsuccessful rewind, unless the new parameter *remove_data_directory_on_rewind_failure* is set to true. By default it is false. - Remove passwords from the replication connection string in DCS (Alexander Kukushkin) Previously, Patroni always used the replication credentials from the Postgres URL in DCS. That is now changed to take the credentials from the patroni configuration. The secrets (replication username and password) and no longer exposed in DCS. - Fix the asynchronous machinery around the demote call (Alexander Kukushkin) Demote now runs totally asynchronously without blocking the DCS interactions. - Make patronictl always send the authorization header if it is configured (Alexander Kukushkin) This allows patronictl to issue "protected" requests, i.e. restart or reinitialize, when Patroni is configured to require authorization on those. - Handle the SystemExit exception correctly (Alexander Kukushkin) Avoids the issues of Patroni not stopping properly when receiving the SIGTERM - Sample haproxy templates for confd (Alexander Kukushkin) Generates and dynamically changes haproxy configuration from the patroni state in the DCS using confide - Improve and restructure the documentation to make it more friendly to the new users (Lauri Apple) - API must report role=master during pg_ctl stop (Alexander Kukushkin) Makes the callback calls more reliable, particularly in the cluster stop case. In addition, introduce the `pg_ctl_timeout` option to set the timeout for the start, stop and restart calls via the `pg_ctl`. - Fix the retry logic in etcd (Alexander Kukushkin) Make retries more predictable and robust. - Make Zookeeper code more resilient against short network hiccups (Alexander Kukushkin) Reduce the connection timeouts to make Zookeeper connection attempts more frequent. Version 0.90 ------------ This releases adds support for Consul, includes a new *noloadbalance* tag, changes the behavior of the *clonefrom* tag, improves *pg_rewind* handling and improves *patronictl* control program. **Consul support** - Implement Consul support (Alexander Kukushkin) Patroni runs against Consul, in addition to Etcd and Zookeeper. the connection parameters can be configured in the YAML file. **New and improved tags** - Implement *noloadbalance* tag (Alexander Kukushkin) This tag makes Patroni always return that the replica is not available to the load balancer. - Change the implementation of the *clonefrom* tag (Alexander Kukushkin) Previously, a node name had to be supplied to the *clonefrom*, forcing a tagged replica to clone from the specific node. The new implementation makes *clonefrom* a boolean tag: if it is set to true, the replica becomes a candidate for other replicas to clone from it. When multiple candidates are present, the replicas picks one randomly. **Stability and security improvements** - Numerous reliability improvements (Alexander Kukushkin) Removes some spurious error messages, improves the stability of the failover, addresses some corner cases with reading data from DCS, shutdown, demote and reattaching of the former leader. - Improve systems script to avoid killing Patroni children on stop (Jan Keirse, Alexander Kukushkin) Previously, when stopping Patroni, *systemd* also sent a signal to PostgreSQL. Since Patroni also tried to stop PostgreSQL by itself, it resulted in sending to different shutdown requests (the smart shutdown, followed by the fast shutdown). That resulted in replicas disconnecting too early and a former master not being able to rejoin after demote. Fix by Jan with prior research by Alexander. - Eliminate some cases where the former master was unable to call pg_rewind before rejoining as a replica (Oleksii Kliukin) Previously, we only called *pg_rewind* if the former master had crashed. Change this to always run pg_rewind for the former master as long as pg_rewind is present in the system. This fixes the case when the master is shut down before the replicas managed to get the latest changes (i.e. during the "smart" shutdown). - Numerous improvements to unit- and acceptance- tests, in particular, enable support for Zookeeper and Consul (Alexander Kukushkin). - Make Travis CI faster and implement support for running tests against Zookeeper (Exhibitor) and Consul (Alexander Kukushkin) Both unit and acceptance tests run automatically against Etcd, Zookeeper and Consul on each commit or pull-request. - Clear environment variables before calling PostgreSQL commands from Patroni (Feike Steenbergen) This prevents a possibility of reading system environment variables by connecting to the PostgreSQL cluster managed by Patroni. **Configuration and control changes** - Unify patronictl and Patroni configuration (Feike Steenbergen) patronictl can use the same configuration file as Patroni itself. - Enable Patroni to read the configuration from the environment variables (Oleksii Kliukin) This simplifies generating configuration for Patroni automatically, or merging a single configuration from different sources. - Include database system identifier in the information returned by the API (Feike Steenbergen) - Implement *delete_cluster* for all available DCSs (Alexander Kukushkin) Enables support for DCSs other than Etcd in patronictl. Version 0.80 ------------ This release adds support for *cascading replication* and simplifies Patroni management by providing *scheduled failovers*. One may use older versions of Patroni (in particular, 0.78) combined with this one in order to migrate to the new release. Note that the scheduled failover and cascading replication related features will only work with Patroni 0.80 and above. **Cascading replication** - Add support for the *replicatefrom* and *clonefrom* tags for the patroni node (Oleksii Kliukin). The tag *replicatefrom* allows a replica to use an arbitrary node a source, not necessary the master. The *clonefrom* does the same for the initial backup. Together, they enable Patroni to fully support cascading replication. - Add support for running replication methods to initialize the replica even without a running replication connection (Oleksii Kliukin). This is useful in order to create replicas from the snapshots stored on S3 or FTP. A replication method that does not require a running replication connection should supply *no_master: true* in the yaml configuration. Those scripts will still be called in order if the replication connection is present. **Patronictl, API and DCS improvements** - Implement scheduled failovers (Feike Steenbergen). Failovers can be scheduled to happen at a certain time in the future, using either patronictl, or API calls. - Add support for *dbuser* and *password* parameters in patronictl (Feike Steenbergen). - Add PostgreSQL version to the health check output (Feike Steenbergen). - Improve Zookeeper support in patronictl (Oleksandr Shulgin) - Migrate to python-etcd 0.43 (Alexander Kukushkin) **Configuration** - Add a sample systems configuration script for Patroni (Jan Keirse). - Fix the problem of Patroni ignoring the superuser name specified in the configuration file for DB connections (Alexander Kukushkin). - Fix the handling of CTRL-C by creating a separate session ID and process group for the postmaster launched by Patroni (Alexander Kukushkin). **Tests** - Add acceptance tests with *behave* in order to check real-world scenarios of running Patroni (Alexander Kukushkin, Oleksii Kliukin). The tests can be launched manually using the *behave* command. They are also launched automatically for pull requests and after commits. Release notes for some older versions can be found on `project's github page `__. patroni-3.2.2/docs/replica_bootstrap.rst000066400000000000000000000263501455170150700204030ustar00rootroot00000000000000Replica imaging and bootstrap ============================= Patroni allows customizing creation of a new replica. It also supports defining what happens when the new empty cluster is being bootstrapped. The distinction between two is well defined: Patroni creates replicas only if the ``initialize`` key is present in DCS for the cluster. If there is no ``initialize`` key - Patroni calls bootstrap exclusively on the first node that takes the initialize key lock. .. _custom_bootstrap: Bootstrap --------- PostgreSQL provides ``initdb`` command to initialize a new cluster and Patroni calls it by default. In certain cases, particularly when creating a new cluster as a copy of an existing one, it is necessary to replace a built-in method with custom actions. Patroni supports executing user-defined scripts to bootstrap new clusters, supplying some required arguments to them, i.e. the name of the cluster and the path to the data directory. This is configured in the ``bootstrap`` section of the Patroni configuration. For example: .. code:: YAML bootstrap: method: : command: [param1 [, ...]] keep_existing_recovery_conf: False no_params: False recovery_conf: recovery_target_action: promote recovery_target_timeline: latest restore_command: Each bootstrap method must define at least a ``name`` and a ``command``. A special ``initdb`` method is available to trigger the default behavior, in which case ``method`` parameter can be omitted altogether. The ``command`` can be specified using either an absolute path, or the one relative to the ``patroni`` command location. In addition to the fixed parameters defined in the configuration files, Patroni supplies two cluster-specific ones: --scope Name of the cluster to be bootstrapped --datadir Path to the data directory of the cluster instance to be bootstrapped Passing these two additional flags can be disabled by setting a special ``no_params`` parameter to ``True``. If the bootstrap script returns ``0``, Patroni tries to configure and start the PostgreSQL instance produced by it. If any of the intermediate steps fail, or the script returns a non-zero value, Patroni assumes that the bootstrap has failed, cleans up after itself and releases the initialize lock to give another node the opportunity to bootstrap. If a ``recovery_conf`` block is defined in the same section as the custom bootstrap method, Patroni will generate a ``recovery.conf`` before starting the newly bootstrapped instance (or set the recovery settings on Postgres configuration if running PostgreSQL >= 12). Typically, such recovery configuration should contain at least one of the ``recovery_target_*`` parameters, together with the ``recovery_target_timeline`` set to ``promote``. If ``keep_existing_recovery_conf`` is defined and set to ``True``, Patroni will not remove the existing ``recovery.conf`` file if it exists (PostgreSQL <= 11). Similarly, in that case Patroni will not remove the existing ``recovery.signal`` or ``standby.signal`` if either exists, nor will it override the configured recovery settings (PostgreSQL >= 12). This is useful when bootstrapping from a backup with tools like pgBackRest that generate the appropriate recovery configuration for you. Besides that, any additional key/value pairs informed in the custom bootstrap method configuration will be passed as arguments to ``command`` in the format ``--name=value``. For example: .. code:: YAML bootstrap: method: : command: arg1: value1 arg2: value2 Makes the configured ``command`` to be called additionally with ``--arg1=value1 --arg2=value2`` command-line arguments. .. note:: Bootstrap methods are neither chained, nor fallen-back to the default one in case the primary one fails .. _custom_replica_creation: Building replicas ----------------- Patroni uses tried and proven ``pg_basebackup`` in order to create new replicas. One downside of it is that it requires a running leader node. Another one is the lack of 'on-the-fly' compression for the backup data and no built-in cleanup for outdated backup files. Some people prefer other backup solutions, such as ``WAL-E``, ``pgBackRest``, ``Barman`` and others, or simply roll their own scripts. In order to accommodate all those use-cases Patroni supports running custom scripts to clone a new replica. Those are configured in the ``postgresql`` configuration block: .. code:: YAML postgresql: create_replica_methods: - : command: keep_data: True no_params: True no_leader: 1 example: wal_e .. code:: YAML postgresql: create_replica_methods: - wal_e - basebackup wal_e: command: patroni_wale_restore no_leader: 1 envdir: {{WALE_ENV_DIR}} use_iam: 1 basebackup: max-rate: '100M' example: pgbackrest .. code:: YAML postgresql: create_replica_methods: - pgbackrest - basebackup pgbackrest: command: /usr/bin/pgbackrest --stanza= --delta restore keep_data: True no_params: True basebackup: max-rate: '100M' The ``create_replica_methods`` defines available replica creation methods and the order of executing them. Patroni will stop on the first one that returns 0. Each method should define a separate section in the configuration file, listing the command to execute and any custom parameters that should be passed to that command. All parameters will be passed in a ``--name=value`` format. Besides user-defined parameters, Patroni supplies a couple of cluster-specific ones: --scope Which cluster this replica belongs to --datadir Path to the data directory of the replica --role Always 'replica' --connstring Connection string to connect to the cluster member to clone from (primary or other replica). The user in the connection string can execute SQL and replication protocol commands. A special ``no_leader`` parameter, if defined, allows Patroni to call the replica creation method even if there is no running leader or replicas. In that case, an empty string will be passed in a connection string. This is useful for restoring the formerly running cluster from the binary backup. A special ``keep_data`` parameter, if defined, will instruct Patroni to not clean PGDATA folder before calling restore. A special ``no_params`` parameter, if defined, restricts passing parameters to custom command. A ``basebackup`` method is a special case: it will be used if ``create_replica_methods`` is empty, although it is possible to list it explicitly among the ``create_replica_methods`` methods. This method initializes a new replica with the ``pg_basebackup``, the base backup is taken from the leader unless there are replicas with ``clonefrom`` tag, in which case one of such replicas will be used as the origin for pg_basebackup. It works without any configuration; however, it is possible to specify a ``basebackup`` configuration section. Same rules as with the other method configuration apply, namely, only long (with --) options should be specified there. Not all parameters make sense, if you override a connection string or provide an option to created tar-ed or compressed base backups, patroni won't be able to make a replica out of it. There is no validation performed on the names or values of the parameters passed to the ``basebackup`` section. Also note that in case symlinks are used for the WAL folder it is up to the user to specify the correct ``--waldir`` path as an option, so that after replica buildup or re-initialization the symlink would persist. This option is supported only since v10 though. You can specify basebackup parameters as either a map (key-value pairs) or a list of elements, where each element could be either a key-value pair or a single key (for options that does not receive any values, for instance, ``--verbose``). Consider those 2 examples: .. code:: YAML postgresql: basebackup: max-rate: '100M' checkpoint: 'fast' and .. code:: YAML postgresql: basebackup: - verbose - max-rate: '100M' - waldir: /pg-wal-mount/external-waldir If all replica creation methods fail, Patroni will try again all methods in order during the next event loop cycle. .. _standby_cluster: Standby cluster --------------- Another available option is to run a "standby cluster", that contains only of standby nodes replicating from some remote node. This type of clusters has: * "standby leader", that behaves pretty much like a regular cluster leader, except it replicates from a remote node. * cascade replicas, that are replicating from standby leader. Standby leader holds and updates a leader lock in DCS. If the leader lock expires, cascade replicas will perform an election to choose another leader from the standbys. There is no further relationship between the standby cluster and the primary cluster it replicates from, in particular, they must not share the same DCS scope if they use the same DCS. They do not know anything else from each other apart from replication information. Also, the standby cluster is not being displayed in :ref:`patronictl_list` or :ref:`patronictl_topology` output on the primary cluster. For the sake of flexibility, you can specify methods of creating a replica and recovery WAL records when a cluster is in the "standby mode" by providing `create_replica_methods` key in `standby_cluster` section. It is distinct from creating replicas, when cluster is detached and functions as a normal cluster, which is controlled by `create_replica_methods` in `postgresql` section. Both "standby" and "normal" `create_replica_methods` reference keys in `postgresql` section. To configure such cluster you need to specify the section ``standby_cluster`` in a patroni configuration: .. code:: YAML bootstrap: dcs: standby_cluster: host: 1.2.3.4 port: 5432 primary_slot_name: patroni create_replica_methods: - basebackup Note, that these options will be applied only once during cluster bootstrap, and the only way to change them afterwards is through DCS. Patroni expects to find `postgresql.conf` or `postgresql.conf.backup` in PGDATA of the remote primary and will not start if it does not find it after a basebackup. If the remote primary keeps its `postgresql.conf` elsewhere, it is your responsibility to copy it to PGDATA. If you use replication slots on the standby cluster, you must also create the corresponding replication slot on the primary cluster. It will not be done automatically by the standby cluster implementation. You can use Patroni's permanent replication slots feature on the primary cluster to maintain a replication slot with the same name as ``primary_slot_name``, or its default value if ``primary_slot_name`` is not provided. patroni-3.2.2/docs/replication_modes.rst000066400000000000000000000203671455170150700203710ustar00rootroot00000000000000.. _replication_modes: ================= Replication modes ================= Patroni uses PostgreSQL streaming replication. For more information about streaming replication, see the `Postgres documentation `__. By default Patroni configures PostgreSQL for asynchronous replication. Choosing your replication schema is dependent on your business considerations. Investigate both async and sync replication, as well as other HA solutions, to determine which solution is best for you. Asynchronous mode durability ---------------------------- In asynchronous mode the cluster is allowed to lose some committed transactions to ensure availability. When the primary server fails or becomes unavailable for any other reason Patroni will automatically promote a sufficiently healthy standby to primary. Any transactions that have not been replicated to that standby remain in a "forked timeline" on the primary, and are effectively unrecoverable [1]_. The amount of transactions that can be lost is controlled via ``maximum_lag_on_failover`` parameter. Because the primary transaction log position is not sampled in real time, in reality the amount of lost data on failover is worst case bounded by ``maximum_lag_on_failover`` bytes of transaction log plus the amount that is written in the last ``ttl`` seconds (``loop_wait``/2 seconds in the average case). However typical steady state replication delay is well under a second. By default, when running leader elections, Patroni does not take into account the current timeline of replicas, what in some cases could be undesirable behavior. You can prevent the node not having the same timeline as a former primary become the new leader by changing the value of ``check_timeline`` parameter to ``true``. PostgreSQL synchronous replication ---------------------------------- You can use Postgres's `synchronous replication `__ with Patroni. Synchronous replication ensures consistency across a cluster by confirming that writes are written to a secondary before returning to the connecting client with a success. The cost of synchronous replication: reduced throughput on writes. This throughput will be entirely based on network performance. In hosted datacenter environments (like AWS, Rackspace, or any network you do not control), synchronous replication significantly increases the variability of write performance. If followers become inaccessible from the leader, the leader effectively becomes read-only. To enable a simple synchronous replication test, add the following lines to the ``parameters`` section of your YAML configuration files: .. code:: YAML synchronous_commit: "on" synchronous_standby_names: "*" When using PostgreSQL synchronous replication, use at least three Postgres data nodes to ensure write availability if one host fails. Using PostgreSQL synchronous replication does not guarantee zero lost transactions under all circumstances. When the primary and the secondary that is currently acting as a synchronous replica fail simultaneously a third node that might not contain all transactions will be promoted. .. _synchronous_mode: Synchronous mode ---------------- For use cases where losing committed transactions is not permissible you can turn on Patroni's ``synchronous_mode``. When ``synchronous_mode`` is turned on Patroni will not promote a standby unless it is certain that the standby contains all transactions that may have returned a successful commit status to client [2]_. This means that the system may be unavailable for writes even though some servers are available. System administrators can still use manual failover commands to promote a standby even if it results in transaction loss. Turning on ``synchronous_mode`` does not guarantee multi node durability of commits under all circumstances. When no suitable standby is available, primary server will still accept writes, but does not guarantee their replication. When the primary fails in this mode no standby will be promoted. When the host that used to be the primary comes back it will get promoted automatically, unless system administrator performed a manual failover. This behavior makes synchronous mode usable with 2 node clusters. When ``synchronous_mode`` is on and a standby crashes, commits will block until next iteration of Patroni runs and switches the primary to standalone mode (worst case delay for writes ``ttl`` seconds, average case ``loop_wait``/2 seconds). Manually shutting down or restarting a standby will not cause a commit service interruption. Standby will signal the primary to release itself from synchronous standby duties before PostgreSQL shutdown is initiated. When it is absolutely necessary to guarantee that each write is stored durably on at least two nodes, enable ``synchronous_mode_strict`` in addition to the ``synchronous_mode``. This parameter prevents Patroni from switching off the synchronous replication on the primary when no synchronous standby candidates are available. As a downside, the primary is not be available for writes (unless the Postgres transaction explicitly turns off ``synchronous_mode``), blocking all client write requests until at least one synchronous replica comes up. You can ensure that a standby never becomes the synchronous standby by setting ``nosync`` tag to true. This is recommended to set for standbys that are behind slow network connections and would cause performance degradation when becoming a synchronous standby. Synchronous mode can be switched on and off via Patroni REST interface. See :ref:`dynamic configuration ` for instructions. Note: Because of the way synchronous replication is implemented in PostgreSQL it is still possible to lose transactions even when using ``synchronous_mode_strict``. If the PostgreSQL backend is cancelled while waiting to acknowledge replication (as a result of packet cancellation due to client timeout or backend failure) transaction changes become visible for other backends. Such changes are not yet replicated and may be lost in case of standby promotion. Synchronous Replication Factor ------------------------------ The parameter ``synchronous_node_count`` is used by Patroni to manage number of synchronous standby databases. It is set to 1 by default. It has no effect when ``synchronous_mode`` is set to off. When enabled, Patroni manages precise number of synchronous standby databases based on parameter ``synchronous_node_count`` and adjusts the state in DCS & synchronous_standby_names as members join and leave. Synchronous mode implementation ------------------------------- When in synchronous mode Patroni maintains synchronization state in the DCS, containing the latest primary and current synchronous standby databases. This state is updated with strict ordering constraints to ensure the following invariants: - A node must be marked as the latest leader whenever it can accept write transactions. Patroni crashing or PostgreSQL not shutting down can cause violations of this invariant. - A node must be set as the synchronous standby in PostgreSQL as long as it is published as the synchronous standby. - A node that is not the leader or current synchronous standby is not allowed to promote itself automatically. Patroni will only assign one or more synchronous standby nodes based on ``synchronous_node_count`` parameter to ``synchronous_standby_names``. On each HA loop iteration Patroni re-evaluates synchronous standby nodes choice. If the current list of synchronous standby nodes are connected and has not requested its synchronous status to be removed it remains picked. Otherwise the cluster member available for sync that is furthest ahead in replication is picked. .. [1] The data is still there, but recovering it requires a manual recovery effort by data recovery specialists. When Patroni is allowed to rewind with ``use_pg_rewind`` the forked timeline will be automatically erased to rejoin the failed primary with the cluster. .. [2] Clients can change the behavior per transaction using PostgreSQL's ``synchronous_commit`` setting. Transactions with ``synchronous_commit`` values of ``off`` and ``local`` may be lost on fail over, but will not be blocked by replication delays. patroni-3.2.2/docs/rest_api.rst000066400000000000000000000652131455170150700164760ustar00rootroot00000000000000.. _rest_api: Patroni REST API ================ Patroni has a rich REST API, which is used by Patroni itself during the leader race, by the :ref:`patronictl` tool in order to perform failovers/switchovers/reinitialize/restarts/reloads, by HAProxy or any other kind of load balancer to perform HTTP health checks, and of course could also be used for monitoring. Below you will find the list of Patroni REST API endpoints. Health check endpoints ---------------------- For all health check ``GET`` requests Patroni returns a JSON document with the status of the node, along with the HTTP status code. If you don't want or don't need the JSON document, you might consider using the ``HEAD`` or ``OPTIONS`` method instead of ``GET``. - The following requests to Patroni REST API will return HTTP status code **200** only when the Patroni node is running as the primary with leader lock: - ``GET /`` - ``GET /primary`` - ``GET /read-write`` - ``GET /standby-leader``: returns HTTP status code **200** only when the Patroni node is running as the leader in a :ref:`standby cluster `. - ``GET /leader``: returns HTTP status code **200** when the Patroni node has the leader lock. The major difference from the two previous endpoints is that it doesn't take into account whether PostgreSQL is running as the ``primary`` or the ``standby_leader``. - ``GET /replica``: replica health check endpoint. It returns HTTP status code **200** only when the Patroni node is in the state ``running``, the role is ``replica`` and ``noloadbalance`` tag is not set. - ``GET /replica?lag=``: replica check endpoint. In addition to checks from ``replica``, it also checks replication latency and returns status code **200** only when it is below specified value. The key cluster.last_leader_operation from DCS is used for Leader wal position and compute latency on replica for performance reasons. max-lag can be specified in bytes (integer) or in human readable values, for e.g. 16kB, 64MB, 1GB. - ``GET /replica?lag=1048576`` - ``GET /replica?lag=1024kB`` - ``GET /replica?lag=10MB`` - ``GET /replica?lag=1GB`` - ``GET /replica?tag_key1=value1&tag_key2=value2``: replica check endpoint. In addition, It will also check for user defined tags ``key1`` and ``key2`` and their respective values in the **tags** section of the yaml configuration management. If the tag isn't defined for an instance, or if the value in the yaml configuration doesn't match the querying value, it will return HTTP Status Code 503. In the following requests, since we are checking for the leader or standby-leader status, Patroni doesn't apply any of the user defined tags and they will be ignored. - ``GET /?tag_key1=value1&tag_key2=value2`` - ``GET /leader?tag_key1=value1&tag_key2=value2`` - ``GET /primary?tag_key1=value1&tag_key2=value2`` - ``GET /read-write?tag_key1=value1&tag_key2=value2`` - ``GET /standby_leader?tag_key1=value1&tag_key2=value2`` - ``GET /standby-leader?tag_key1=value1&tag_key2=value2`` - ``GET /read-only``: like the above endpoint, but also includes the primary. - ``GET /synchronous`` or ``GET /sync``: returns HTTP status code **200** only when the Patroni node is running as a synchronous standby. - ``GET /read-only-sync``: like the above endpoint, but also includes the primary. - ``GET /asynchronous`` or ``GET /async``: returns HTTP status code **200** only when the Patroni node is running as an asynchronous standby. - ``GET /asynchronous?lag=`` or ``GET /async?lag=``: asynchronous standby check endpoint. In addition to checks from ``asynchronous`` or ``async``, it also checks replication latency and returns status code **200** only when it is below specified value. The key cluster.last_leader_operation from DCS is used for Leader wal position and compute latency on replica for performance reasons. max-lag can be specified in bytes (integer) or in human readable values, for e.g. 16kB, 64MB, 1GB. - ``GET /async?lag=1048576`` - ``GET /async?lag=1024kB`` - ``GET /async?lag=10MB`` - ``GET /async?lag=1GB`` - ``GET /health``: returns HTTP status code **200** only when PostgreSQL is up and running. - ``GET /liveness``: returns HTTP status code **200** if Patroni heartbeat loop is properly running and **503** if the last run was more than ``ttl`` seconds ago on the primary or ``2*ttl`` on the replica. Could be used for ``livenessProbe``. - ``GET /readiness``: returns HTTP status code **200** when the Patroni node is running as the leader or when PostgreSQL is up and running. The endpoint could be used for ``readinessProbe`` when it is not possible to use Kubernetes endpoints for leader elections (OpenShift). Both, ``readiness`` and ``liveness`` endpoints are very light-weight and not executing any SQL. Probes should be configured in such a way that they start failing about time when the leader key is expiring. With the default value of ``ttl``, which is ``30s`` example probes would look like: .. code-block:: yaml readinessProbe: httpGet: scheme: HTTP path: /readiness port: 8008 initialDelaySeconds: 3 periodSeconds: 10 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 3 livenessProbe: httpGet: scheme: HTTP path: /liveness port: 8008 initialDelaySeconds: 3 periodSeconds: 10 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 3 Monitoring endpoint ------------------- The ``GET /patroni`` is used by Patroni during the leader race. It also could be used by your monitoring system. The JSON document produced by this endpoint has the same structure as the JSON produced by the health check endpoints. **Example:** A healthy cluster .. code-block:: bash $ curl -s http://localhost:8008/patroni | jq . { "state": "running", "postmaster_start_time": "2023-08-18 11:03:37.966359+00:00", "role": "master", "server_version": 150004, "xlog": { "location": 67395656 }, "timeline": 1, "replication": [ { "usename": "replicator", "application_name": "patroni2", "client_addr": "10.89.0.6", "state": "streaming", "sync_state": "async", "sync_priority": 0 }, { "usename": "replicator", "application_name": "patroni3", "client_addr": "10.89.0.2", "state": "streaming", "sync_state": "async", "sync_priority": 0 } ], "dcs_last_seen": 1692356718, "tags": { "clonefrom": true }, "database_system_identifier": "7268616322854375442", "patroni": { "version": "3.1.0", "scope": "demo", "name": "patroni1" } } **Example:** An unlocked cluster .. code-block:: bash $ curl -s http://localhost:8008/patroni | jq . { "state": "running", "postmaster_start_time": "2023-08-18 11:09:08.615242+00:00", "role": "replica", "server_version": 150004, "xlog": { "received_location": 67419744, "replayed_location": 67419744, "replayed_timestamp": null, "paused": false }, "timeline": 1, "replication": [ { "usename": "replicator", "application_name": "patroni2", "client_addr": "10.89.0.6", "state": "streaming", "sync_state": "async", "sync_priority": 0 }, { "usename": "replicator", "application_name": "patroni3", "client_addr": "10.89.0.2", "state": "streaming", "sync_state": "async", "sync_priority": 0 } ], "cluster_unlocked": true, "dcs_last_seen": 1692356928, "tags": { "clonefrom": true }, "database_system_identifier": "7268616322854375442", "patroni": { "version": "3.1.0", "scope": "demo", "name": "patroni1" } } **Example:** An unlocked cluster with :ref:`DCS failsafe mode ` enabled .. code-block:: bash $ curl -s http://localhost:8008/patroni | jq . { "state": "running", "postmaster_start_time": "2023-08-18 11:09:08.615242+00:00", "role": "replica", "server_version": 150004, "xlog": { "location": 67420024 }, "timeline": 1, "replication": [ { "usename": "replicator", "application_name": "patroni2", "client_addr": "10.89.0.6", "state": "streaming", "sync_state": "async", "sync_priority": 0 }, { "usename": "replicator", "application_name": "patroni3", "client_addr": "10.89.0.2", "state": "streaming", "sync_state": "async", "sync_priority": 0 } ], "cluster_unlocked": true, "failsafe_mode_is_active": true, "dcs_last_seen": 1692356928, "tags": { "clonefrom": true }, "database_system_identifier": "7268616322854375442", "patroni": { "version": "3.1.0", "scope": "demo", "name": "patroni1" } } **Example:** A cluster with the :ref:`pause mode ` enabled .. code-block:: bash $ curl -s http://localhost:8008/patroni | jq . { "state": "running", "postmaster_start_time": "2023-08-18 11:09:08.615242+00:00", "role": "replica", "server_version": 150004, "xlog": { "location": 67420024 }, "timeline": 1, "replication": [ { "usename": "replicator", "application_name": "patroni2", "client_addr": "10.89.0.6", "state": "streaming", "sync_state": "async", "sync_priority": 0 }, { "usename": "replicator", "application_name": "patroni3", "client_addr": "10.89.0.2", "state": "streaming", "sync_state": "async", "sync_priority": 0 } ], "pause": true, "dcs_last_seen": 1692356928, "tags": { "clonefrom": true }, "database_system_identifier": "7268616322854375442", "patroni": { "version": "3.1.0", "scope": "demo", "name": "patroni1" } } Retrieve the Patroni metrics in Prometheus format through the ``GET /metrics`` endpoint. .. code-block:: bash $ curl http://localhost:8008/metrics # HELP patroni_version Patroni semver without periods. \ # TYPE patroni_version gauge patroni_version{scope="batman",name="patroni1"} 020103 # HELP patroni_postgres_running Value is 1 if Postgres is running, 0 otherwise. # TYPE patroni_postgres_running gauge patroni_postgres_running{scope="batman",name="patroni1"} 1 # HELP patroni_postmaster_start_time Epoch seconds since Postgres started. # TYPE patroni_postmaster_start_time gauge patroni_postmaster_start_time{scope="batman",name="patroni1"} 1657656955.179243 # HELP patroni_master Value is 1 if this node is the leader, 0 otherwise. # TYPE patroni_master gauge patroni_master{scope="batman",name="patroni1"} 1 # HELP patroni_primary Value is 1 if this node is the leader, 0 otherwise. # TYPE patroni_primary gauge patroni_primary{scope="batman",name="patroni1"} 1 # HELP patroni_xlog_location Current location of the Postgres transaction log, 0 if this node is not the leader. # TYPE patroni_xlog_location counter patroni_xlog_location{scope="batman",name="patroni1"} 22320573386952 # HELP patroni_standby_leader Value is 1 if this node is the standby_leader, 0 otherwise. # TYPE patroni_standby_leader gauge patroni_standby_leader{scope="batman",name="patroni1"} 0 # HELP patroni_replica Value is 1 if this node is a replica, 0 otherwise. # TYPE patroni_replica gauge patroni_replica{scope="batman",name="patroni1"} 0 # HELP patroni_sync_standby Value is 1 if this node is a sync standby replica, 0 otherwise. # TYPE patroni_sync_standby gauge patroni_sync_standby{scope="batman",name="patroni1"} 0 # HELP patroni_xlog_received_location Current location of the received Postgres transaction log, 0 if this node is not a replica. # TYPE patroni_xlog_received_location counter patroni_xlog_received_location{scope="batman",name="patroni1"} 0 # HELP patroni_xlog_replayed_location Current location of the replayed Postgres transaction log, 0 if this node is not a replica. # TYPE patroni_xlog_replayed_location counter patroni_xlog_replayed_location{scope="batman",name="patroni1"} 0 # HELP patroni_xlog_replayed_timestamp Current timestamp of the replayed Postgres transaction log, 0 if null. # TYPE patroni_xlog_replayed_timestamp gauge patroni_xlog_replayed_timestamp{scope="batman",name="patroni1"} 0 # HELP patroni_xlog_paused Value is 1 if the Postgres xlog is paused, 0 otherwise. # TYPE patroni_xlog_paused gauge patroni_xlog_paused{scope="batman",name="patroni1"} 0 # HELP patroni_postgres_streaming Value is 1 if Postgres is streaming, 0 otherwise. # TYPE patroni_postgres_streaming gauge patroni_postgres_streaming{scope="batman",name="patroni1"} 1 # HELP patroni_postgres_in_archive_recovery Value is 1 if Postgres is replicating from archive, 0 otherwise. # TYPE patroni_postgres_in_archive_recovery gauge patroni_postgres_in_archive_recovery{scope="batman",name="patroni1"} 0 # HELP patroni_postgres_server_version Version of Postgres (if running), 0 otherwise. # TYPE patroni_postgres_server_version gauge patroni_postgres_server_version{scope="batman",name="patroni1"} 140004 # HELP patroni_cluster_unlocked Value is 1 if the cluster is unlocked, 0 if locked. # TYPE patroni_cluster_unlocked gauge patroni_cluster_unlocked{scope="batman",name="patroni1"} 0 # HELP patroni_postgres_timeline Postgres timeline of this node (if running), 0 otherwise. # TYPE patroni_postgres_timeline counter patroni_failsafe_mode_is_active{scope="batman",name="patroni1"} 0 # HELP patroni_postgres_timeline Postgres timeline of this node (if running), 0 otherwise. # TYPE patroni_postgres_timeline counter patroni_postgres_timeline{scope="batman",name="patroni1"} 24 # HELP patroni_dcs_last_seen Epoch timestamp when DCS was last contacted successfully by Patroni. # TYPE patroni_dcs_last_seen gauge patroni_dcs_last_seen{scope="batman",name="patroni1"} 1677658321 # HELP patroni_pending_restart Value is 1 if the node needs a restart, 0 otherwise. # TYPE patroni_pending_restart gauge patroni_pending_restart{scope="batman",name="patroni1"} 1 # HELP patroni_is_paused Value is 1 if auto failover is disabled, 0 otherwise. # TYPE patroni_is_paused gauge patroni_is_paused{scope="batman",name="patroni1"} 1 Cluster status endpoints ------------------------ - The ``GET /cluster`` endpoint generates a JSON document describing the current cluster topology and state: .. code-block:: bash $ curl -s http://localhost:8008/cluster | jq . { "members": [ { "name": "patroni1", "role": "leader", "state": "running", "api_url": "http://10.89.0.4:8008/patroni", "host": "10.89.0.4", "port": 5432, "timeline": 5, "tags": { "clonefrom": true } }, { "name": "patroni2", "role": "replica", "state": "streaming", "api_url": "http://10.89.0.6:8008/patroni", "host": "10.89.0.6", "port": 5433, "timeline": 5, "tags": { "clonefrom": true }, "lag": 0 } ], "scope": "demo", "scheduled_switchover": { "at": "2023-09-24T10:36:00+02:00", "from": "patroni1", "to": "patroni3" } } - The ``GET /history`` endpoint provides a view on the history of cluster switchovers/failovers. The format is very similar to the content of history files in the ``pg_wal`` directory. The only difference is the timestamp field showing when the new timeline was created. .. code-block:: bash $ curl -s http://localhost:8008/history | jq . [ [ 1, 25623960, "no recovery target specified", "2019-09-23T16:57:57+02:00" ], [ 2, 25624344, "no recovery target specified", "2019-09-24T09:22:33+02:00" ], [ 3, 25624752, "no recovery target specified", "2019-09-24T09:26:15+02:00" ], [ 4, 50331856, "no recovery target specified", "2019-09-24T09:35:52+02:00" ] ] .. _config_endpoint: Config endpoint --------------- ``GET /config``: Get the current version of the dynamic configuration: .. code-block:: bash $ curl -s http://localhost:8008/config | jq . { "ttl": 30, "loop_wait": 10, "retry_timeout": 10, "maximum_lag_on_failover": 1048576, "postgresql": { "use_slots": true, "use_pg_rewind": true, "parameters": { "hot_standby": "on", "wal_level": "hot_standby", "max_wal_senders": 5, "max_replication_slots": 5, "max_connections": "100" } } } ``PATCH /config``: Change the existing configuration. .. code-block:: bash $ curl -s -XPATCH -d \ '{"loop_wait":5,"ttl":20,"postgresql":{"parameters":{"max_connections":"101"}}}' \ http://localhost:8008/config | jq . { "ttl": 20, "loop_wait": 5, "maximum_lag_on_failover": 1048576, "retry_timeout": 10, "postgresql": { "use_slots": true, "use_pg_rewind": true, "parameters": { "hot_standby": "on", "wal_level": "hot_standby", "max_wal_senders": 5, "max_replication_slots": 5, "max_connections": "101" } } } The above REST API call patches the existing configuration and returns the new configuration. Let's check that the node processed this configuration. First of all it should start printing log lines every 5 seconds (loop_wait=5). The change of "max_connections" requires a restart, so the "pending_restart" flag should be exposed: .. code-block:: bash $ curl -s http://localhost:8008/patroni | jq . { "pending_restart": true, "database_system_identifier": "6287881213849985952", "postmaster_start_time": "2016-06-13 13:13:05.211 CEST", "xlog": { "location": 2197818976 }, "patroni": { "version": "1.0", "scope": "batman", "name": "patroni1" }, "state": "running", "role": "master", "server_version": 90503 } Removing parameters: If you want to remove (reset) some setting just patch it with ``null``: .. code-block:: bash $ curl -s -XPATCH -d \ '{"postgresql":{"parameters":{"max_connections":null}}}' \ http://localhost:8008/config | jq . { "ttl": 20, "loop_wait": 5, "retry_timeout": 10, "maximum_lag_on_failover": 1048576, "postgresql": { "use_slots": true, "use_pg_rewind": true, "parameters": { "hot_standby": "on", "unix_socket_directories": ".", "wal_level": "hot_standby", "max_wal_senders": 5, "max_replication_slots": 5 } } } The above call removes ``postgresql.parameters.max_connections`` from the dynamic configuration. ``PUT /config``: It's also possible to perform the full rewrite of an existing dynamic configuration unconditionally: .. code-block:: bash $ curl -s -XPUT -d \ '{"maximum_lag_on_failover":1048576,"retry_timeout":10,"postgresql":{"use_slots":true,"use_pg_rewind":true,"parameters":{"hot_standby":"on","wal_level":"hot_standby","unix_socket_directories":".","max_wal_senders":5}},"loop_wait":3,"ttl":20}' \ http://localhost:8008/config | jq . { "ttl": 20, "maximum_lag_on_failover": 1048576, "retry_timeout": 10, "postgresql": { "use_slots": true, "parameters": { "hot_standby": "on", "unix_socket_directories": ".", "wal_level": "hot_standby", "max_wal_senders": 5 }, "use_pg_rewind": true }, "loop_wait": 3 } Switchover and failover endpoints --------------------------------- .. _switchover_api: Switchover ^^^^^^^^^^ ``/switchover`` endpoint only works when the cluster is healthy (there is a leader). It also allows to schedule a switchover at a given time. When calling ``/switchover`` endpoint a candidate can be specified but is not required, in contrast to ``/failover`` endpoint. If a candidate is not provided, all the eligible nodes of the cluster will participate in the leader race after the leader stepped down. In the JSON body of the ``POST`` request you must specify the ``leader`` field. The ``candidate`` and the ``scheduled_at`` fields are optional and can be used to schedule a switchover at a specific time. Depending on the situation, requests might return different HTTP status codes and bodies. Status code **200** is returned when the switchover or failover successfully completed. If the switchover was successfully scheduled, Patroni will return HTTP status code **202**. In case something went wrong, the error status code (one of **400**, **412**, or **503**) will be returned with some details in the response body. ``DELETE /switchover`` can be used to delete the currently scheduled switchover. **Example:** perform a switchover to any healthy standby .. code-block:: bash $ curl -s http://localhost:8008/switchover -XPOST -d '{"leader":"postgresql1"}' Successfully switched over to "postgresql2" **Example:** perform a switchover to a specific node .. code-block:: bash $ curl -s http://localhost:8008/switchover -XPOST -d \ '{"leader":"postgresql1","candidate":"postgresql2"}' Successfully switched over to "postgresql2" **Example:** schedule a switchover from the leader to any other healthy standby in the cluster at a specific time. .. code-block:: bash $ curl -s http://localhost:8008/switchover -XPOST -d \ '{"leader":"postgresql0","scheduled_at":"2019-09-24T12:00+00"}' Switchover scheduled Failover ^^^^^^^^ ``/failover`` endpoint can be used to perform a manual failover when there are no healthy nodes (e.g. to an asynchronous standby if all synchronous standbys are not healthy enough to promote). However there is no requirement for a cluster not to have leader - failover can also be run on a healthy cluster. In the JSON body of the ``POST`` request you must specify the ``candidate`` field. If the ``leader`` field is specified, a switchover is triggered instead. **Example:** .. code-block:: bash $ curl -s http://localhost:8008/failover -XPOST -d '{"candidate":"postgresql1"}' Successfully failed over to "postgresql1" .. warning:: :ref:`Be very careful ` when using this endpoint, as this can cause data loss in certain situations. In most cases, :ref:`the switchover endpoint ` satisfies the administrator's needs. ``POST /switchover`` and ``POST /failover`` endpoints are used by :ref:`patronictl_switchover` and :ref:`patronictl_failover`, respectively. ``DELETE /switchover`` is used by :ref:`patronictl flush cluster-name switchover `. .. list-table:: Failover/Switchover comparison :widths: 25 25 25 :header-rows: 1 * - - Failover - Switchover * - Requires leader specified - no - yes * - Requires candidate specified - yes - no * - Can be run in pause - yes - yes (only to a specific candidate) * - Can be scheduled - no - yes (if not in pause) .. _failover_healthcheck: Healthy standby ^^^^^^^^^^^^^^^ There are a couple of checks that a member of a cluster should pass to be able to participate in the leader race during a switchover or to become a leader as a failover/switchover candidate: - be reachable via Patroni API; - not have ``nofailover`` tag set to ``true``; - have watchdog fully functional (if required by the configuration); - in case of a switchover in a healthy cluster or an automatic failover, not exceed maximum replication lag (``maximum_lag_on_failover`` :ref:`configuration parameter `); - in case of a switchover in a healthy cluster or an automatic failover, not have a timeline number smaller than the cluster timeline if ``check_timeline`` :ref:`configuration parameter ` is set to ``true``; - in :ref:`synchronous mode `: - In case of a switchover (both with and without a candidate): be listed in the ``/sync`` key members; - For a failover in both healthy and unhealthy clusters, this check is omitted. .. warning:: In case of a manual failover in a cluster without a leader, a candidate will be allowed to promote even if: - it is not in the ``/sync`` key members when synchronous mode is enabled; - its lag exceeds the maximum replication lag allowed; - it has the timeline number smaller than the last known cluster timeline. .. _restart_endpoint: Restart endpoint ---------------- - ``POST /restart``: You can restart Postgres on the specific node by performing the ``POST /restart`` call. In the JSON body of ``POST`` request it is possible to optionally specify some restart conditions: - **restart_pending**: boolean, if set to ``true`` Patroni will restart PostgreSQL only when restart is pending in order to apply some changes in the PostgreSQL config. - **role**: perform restart only if the current role of the node matches with the role from the POST request. - **postgres_version**: perform restart only if the current version of postgres is smaller than specified in the POST request. - **timeout**: how long we should wait before PostgreSQL starts accepting connections. Overrides ``primary_start_timeout``. - **schedule**: timestamp with time zone, schedule the restart somewhere in the future. - ``DELETE /restart``: delete the scheduled restart ``POST /restart`` and ``DELETE /restart`` endpoints are used by :ref:`patronictl_restart` and :ref:`patronictl flush cluster-name restart ` respectively. .. _reload_endpoint: Reload endpoint --------------- The ``POST /reload`` call will order Patroni to re-read and apply the configuration file. This is the equivalent of sending the ``SIGHUP`` signal to the Patroni process. In case you changed some of the Postgres parameters which require a restart (like **shared_buffers**), you still have to explicitly do the restart of Postgres by either calling the ``POST /restart`` endpoint or with the help of :ref:`patronictl_restart`. The reload endpoint is used by :ref:`patronictl_reload`. Reinitialize endpoint --------------------- ``POST /reinitialize``: reinitialize the PostgreSQL data directory on the specified node. It is allowed to be executed only on replicas. Once called, it will remove the data directory and start ``pg_basebackup`` or some alternative :ref:`replica creation method `. The call might fail if Patroni is in a loop trying to recover (restart) a failed Postgres. In order to overcome this problem one can specify ``{"force":true}`` in the request body. The reinitialize endpoint is used by :ref:`patronictl_reinit`. patroni-3.2.2/docs/security.rst000066400000000000000000000052531455170150700165350ustar00rootroot00000000000000.. _security: ======================= Security Considerations ======================= A Patroni cluster has two interfaces to be protected from unauthorized access: the distributed configuration storage (DCS) and the Patroni REST API. Protecting DCS ============== Patroni and :ref:`patronictl` both store and retrieve data to/from the DCS. Despite DCS doesn't contain any sensitive information, it allows changing some of Patroni/Postgres configuration. Therefore the very first thing that should be protected is DCS itself. The details of protection depend on the type of DCS used. The authentication and encryption parameters (tokens/basic-auth/client certificates) for the supported types of DCS are covered in :ref:`settings `. The general recommendation is to enable TLS for all DCS communication. Protecting the REST API ======================= Protecting the REST API is a more complicated task. The Patroni REST API is used by Patroni itself during the leader race, by the :ref:`patronictl` tool in order to perform failovers/switchovers/reinitialize/restarts/reloads, by HAProxy or any other kind of load balancer to perform HTTP health checks, and of course could also be used for monitoring. From the point of view of security, REST API contains safe (``GET`` requests, only retrieve information) and unsafe (``PUT``, ``POST``, ``PATCH`` and ``DELETE`` requests, change the state of nodes) endpoints. The unsafe endpoints can be protected with HTTP basic-auth by setting the ``restapi.authentication.username`` and ``restapi.authentication.password`` parameters. There is no way to protect the safe endpoints without enabling TLS. When TLS for the REST API is enabled and a PKI is established, mutual authentication of the API server and API client is possible for all endpoints. The ``restapi`` section parameters enable TLS client authentication to the server. Depending on the value of the ``verify_client`` parameter, the API server requires a successful client certificate verification for both safe and unsafe API calls (``verify_client: required``), or only for unsafe API calls (``verify_client: optional``), or for no API calls (``verify_client: none``). The ``ctl`` section parameters enable TLS server authentication to the client (the :ref:`patronictl` tool which uses the same config as patroni). Set ``insecure: true`` to disable the server certificate verification by the client. See :ref:`settings ` for a detailed description of the TLS client parameters. Protecting the PostgreSQL database proper from unauthorized access is beyond the scope of this document and is covered in https://www.postgresql.org/docs/current/client-authentication.html patroni-3.2.2/docs/watchdog.rst000066400000000000000000000076261455170150700164740ustar00rootroot00000000000000.. _watchdog: Watchdog support ================ Having multiple PostgreSQL servers running as primary can result in transactions lost due to diverging timelines. This situation is also called a split-brain problem. To avoid split-brain Patroni needs to ensure PostgreSQL will not accept any transaction commits after leader key expires in the DCS. Under normal circumstances Patroni will try to achieve this by stopping PostgreSQL when leader lock update fails for any reason. However, this may fail to happen due to various reasons: - Patroni has crashed due to a bug, out-of-memory condition or by being accidentally killed by a system administrator. - Shutting down PostgreSQL is too slow. - Patroni does not get to run due to high load on the system, the VM being paused by the hypervisor, or other infrastructure issues. To guarantee correct behavior under these conditions Patroni supports watchdog devices. Watchdog devices are software or hardware mechanisms that will reset the whole system when they do not get a keepalive heartbeat within a specified timeframe. This adds an additional layer of fail safe in case usual Patroni split-brain protection mechanisms fail. Patroni will try to activate the watchdog before promoting PostgreSQL to primary. If watchdog activation fails and watchdog mode is ``required`` then the node will refuse to become leader. When deciding to participate in leader election Patroni will also check that watchdog configuration will allow it to become leader at all. After demoting PostgreSQL (for example due to a manual failover) Patroni will disable the watchdog again. Watchdog will also be disabled while Patroni is in paused state. By default Patroni will set up the watchdog to expire 5 seconds before TTL expires. With the default setup of ``loop_wait=10`` and ``ttl=30`` this gives HA loop at least 15 seconds (``ttl`` - ``safety_margin`` - ``loop_wait``) to complete before the system gets forcefully reset. By default accessing DCS is configured to time out after 10 seconds. This means that when DCS is unavailable, for example due to network issues, Patroni and PostgreSQL will have at least 5 seconds (``ttl`` - ``safety_margin`` - ``loop_wait`` - ``retry_timeout``) to come to a state where all client connections are terminated. Safety margin is the amount of time that Patroni reserves for time between leader key update and watchdog keepalive. Patroni will try to send a keepalive immediately after confirmation of leader key update. If Patroni process is suspended for extended amount of time at exactly the right moment the keepalive may be delayed for more than the safety margin without triggering the watchdog. This results in a window of time where watchdog will not trigger before leader key expiration, invalidating the guarantee. To be absolutely sure that watchdog will trigger under all circumstances set up the watchdog to expire after half of TTL by setting ``safety_margin`` to -1 to set watchdog timeout to ``ttl // 2``. If you need this guarantee you probably should increase ``ttl`` and/or reduce ``loop_wait`` and ``retry_timeout``. Currently watchdogs are only supported using Linux watchdog device interface. Setting up software watchdog on Linux ------------------------------------- Default Patroni configuration will try to use ``/dev/watchdog`` on Linux if it is accessible to Patroni. For most use cases using software watchdog built into the Linux kernel is secure enough. To enable software watchdog issue the following commands as root before starting Patroni: .. code-block:: bash modprobe softdog # Replace postgres with the user you will be running patroni under chown postgres /dev/watchdog For testing it may be helpful to disable rebooting by adding ``soft_noboot=1`` to the modprobe command line. In this case the watchdog will just log a line in kernel ring buffer, visible via `dmesg`. Patroni will log information about the watchdog when it is successfully enabled. patroni-3.2.2/docs/yaml_configuration.rst000066400000000000000000001201511455170150700205520ustar00rootroot00000000000000.. _yaml_configuration: ============================ YAML Configuration Settings ============================ Global/Universal ---------------- - **name**: the name of the host. Must be unique for the cluster. - **namespace**: path within the configuration store where Patroni will keep information about the cluster. Default value: "/service" - **scope**: cluster name Log --- - **level**: sets the general logging level. Default value is **INFO** (see `the docs for Python logging `_) - **traceback\_level**: sets the level where tracebacks will be visible. Default value is **ERROR**. Set it to **DEBUG** if you want to see tracebacks only if you enable **log.level=DEBUG**. - **format**: sets the log formatting string. Default value is **%(asctime)s %(levelname)s: %(message)s** (see `the LogRecord attributes `_) - **dateformat**: sets the datetime formatting string. (see the `formatTime() documentation `_) - **max\_queue\_size**: Patroni is using two-step logging. Log records are written into the in-memory queue and there is a separate thread which pulls them from the queue and writes to stderr or file. The maximum size of the internal queue is limited by default by **1000** records, which is enough to keep logs for the past 1h20m. - **dir**: Directory to write application logs to. The directory must exist and be writable by the user executing Patroni. If you set this value, the application will retain 4 25MB logs by default. You can tune those retention values with `file_num` and `file_size` (see below). - **file\_num**: The number of application logs to retain. - **file\_size**: Size of patroni.log file (in bytes) that triggers a log rolling. - **loggers**: This section allows redefining logging level per python module - **patroni.postmaster: WARNING** - **urllib3: DEBUG** .. _bootstrap_settings: Bootstrap configuration ----------------------- .. note:: Once Patroni has initialized the cluster for the first time and settings have been stored in the DCS, all future changes to the ``bootstrap.dcs`` section of the YAML configuration will not take any effect! If you want to change them please use either :ref:`patronictl_edit_config` or the Patroni :ref:`REST API `. - **bootstrap**: - **dcs**: This section will be written into `///config` of the given configuration store after initializing the new cluster. The global dynamic configuration for the cluster. You can put any of the parameters described in the :ref:`Dynamic Configuration settings ` under ``bootstrap.dcs`` and after Patroni has initialized (bootstrapped) the new cluster, it will write this section into `///config` of the configuration store. - **method**: custom script to use for bootstrapping this cluster. See :ref:`custom bootstrap methods documentation ` for details. When ``initdb`` is specified revert to the default ``initdb`` command. ``initdb`` is also triggered when no ``method`` parameter is present in the configuration file. - **initdb**: (optional) list options to be passed on to initdb. - **- data-checksums**: Must be enabled when pg_rewind is needed on 9.3. - **- encoding: UTF8**: default encoding for new databases. - **- locale: UTF8**: default locale for new databases. - **post\_bootstrap** or **post\_init**: An additional script that will be executed after initializing the cluster. The script receives a connection string URL (with the cluster superuser as a user name). The PGPASSFILE variable is set to the location of pgpass file. .. _citus_settings: Citus ----- Enables integration Patroni with `Citus `__. If configured, Patroni will take care of registering Citus worker nodes on the coordinator. You can find more information about Citus support :ref:`here `. - **group**: the Citus group id, integer. Use ``0`` for coordinator and ``1``, ``2``, etc... for workers - **database**: the database where ``citus`` extension should be created. Must be the same on the coordinator and all workers. Currently only one database is supported. .. _consul_settings: Consul ------ Most of the parameters are optional, but you have to specify one of the **host** or **url** - **host**: the host:port for the Consul local agent. - **url**: url for the Consul local agent, in format: http(s)://host:port. - **port**: (optional) Consul port. - **scheme**: (optional) **http** or **https**, defaults to **http**. - **token**: (optional) ACL token. - **verify**: (optional) whether to verify the SSL certificate for HTTPS requests. - **cacert**: (optional) The ca certificate. If present it will enable validation. - **cert**: (optional) file with the client certificate. - **key**: (optional) file with the client key. Can be empty if the key is part of **cert**. - **dc**: (optional) Datacenter to communicate with. By default the datacenter of the host is used. - **consistency**: (optional) Select consul consistency mode. Possible values are ``default``, ``consistent``, or ``stale`` (more details in `consul API reference `__) - **checks**: (optional) list of Consul health checks used for the session. By default an empty list is used. - **register\_service**: (optional) whether or not to register a service with the name defined by the scope parameter and the tag master, primary, replica, or standby-leader depending on the node's role. Defaults to **false**. - **service\_tags**: (optional) additional static tags to add to the Consul service apart from the role (``master``/``primary``/``replica``/``standby-leader``). By default an empty list is used. - **service\_check\_interval**: (optional) how often to perform health check against registered url. Defaults to '5s'. - **service\_check\_tls\_server\_name**: (optional) overide SNI host when connecting via TLS, see also `consul agent check API reference `__. The ``token`` needs to have the following ACL permissions: :: service_prefix "${scope}" { policy = "write" } key_prefix "${namespace}/${scope}" { policy = "write" } session_prefix "" { policy = "write" } Etcd ---- Most of the parameters are optional, but you have to specify one of the **host**, **hosts**, **url**, **proxy** or **srv** - **host**: the host:port for the etcd endpoint. - **hosts**: list of etcd endpoint in format host1:port1,host2:port2,etc... Could be a comma separated string or an actual yaml list. - **use\_proxies**: If this parameter is set to true, Patroni will consider **hosts** as a list of proxies and will not perform a topology discovery of etcd cluster. - **url**: url for the etcd. - **proxy**: proxy url for the etcd. If you are connecting to the etcd using proxy, use this parameter instead of **url**. - **srv**: Domain to search the SRV record(s) for cluster autodiscovery. Patroni will try to query these SRV service names for specified domain (in that order until first success): ``_etcd-client-ssl``, ``_etcd-client``, ``_etcd-ssl``, ``_etcd``, ``_etcd-server-ssl``, ``_etcd-server``. If SRV records for ``_etcd-server-ssl`` or ``_etcd-server`` are retrieved then ETCD peer protocol is used do query ETCD for available members. Otherwise hosts from SRV records will be used. - **srv\_suffix**: Configures a suffix to the SRV name that is queried during discovery. Use this flag to differentiate between multiple etcd clusters under the same domain. Works only with conjunction with **srv**. For example, if ``srv_suffix: foo`` and ``srv: example.org`` are set, the following DNS SRV query is made:``_etcd-client-ssl-foo._tcp.example.com`` (and so on for every possible ETCD SRV service name). - **protocol**: (optional) http or https, if not specified http is used. If the **url** or **proxy** is specified - will take protocol from them. - **username**: (optional) username for etcd authentication. - **password**: (optional) password for etcd authentication. - **cacert**: (optional) The ca certificate. If present it will enable validation. - **cert**: (optional) file with the client certificate. - **key**: (optional) file with the client key. Can be empty if the key is part of **cert**. Etcdv3 ------ If you want that Patroni works with Etcd cluster via protocol version 3, you need to use the ``etcd3`` section in the Patroni configuration file. All configuration parameters are the same as for ``etcd``. .. warning:: Keys created with protocol version 2 are not visible with protocol version 3 and the other way around, therefore it is not possible to switch from ``etcd`` to ``etcd3`` just by updating Patroni config file. ZooKeeper ---------- - **hosts**: List of ZooKeeper cluster members in format: ['host1:port1', 'host2:port2', 'etc...']. - **use_ssl**: (optional) Whether SSL is used or not. Defaults to ``false``. If set to ``false``, all SSL specific parameters are ignored. - **cacert**: (optional) The CA certificate. If present it will enable validation. - **cert**: (optional) File with the client certificate. - **key**: (optional) File with the client key. - **key_password**: (optional) The client key password. - **verify**: (optional) Whether to verify certificate or not. Defaults to ``true``. - **set_acls**: (optional) If set, configure Kazoo to apply a default ACL to each ZNode that it creates. ACLs will assume 'x509' schema and should be specified as a dictionary with the principal as the key and one or more permissions as a list in the value. Permissions may be one of ``CREATE``, ``READ``, ``WRITE``, ``DELETE`` or ``ADMIN``. For example, ``set_acls: {CN=principal1: [CREATE, READ], CN=principal2: [ALL]}``. .. note:: It is required to install ``kazoo>=2.6.0`` to support SSL. Exhibitor --------- - **hosts**: initial list of Exhibitor (ZooKeeper) nodes in format: 'host1,host2,etc...'. This list updates automatically whenever the Exhibitor (ZooKeeper) cluster topology changes. - **poll\_interval**: how often the list of ZooKeeper and Exhibitor nodes should be updated from Exhibitor. - **port**: Exhibitor port. .. _kubernetes_settings: Kubernetes ---------- - **bypass\_api\_service**: (optional) When communicating with the Kubernetes API, Patroni is usually relying on the `kubernetes` service, the address of which is exposed in the pods via the `KUBERNETES_SERVICE_HOST` environment variable. If `bypass_api_service` is set to ``true``, Patroni will resolve the list of API nodes behind the service and connect directly to them. - **namespace**: (optional) Kubernetes namespace where Patroni pod is running. Default value is `default`. - **labels**: Labels in format ``{label1: value1, label2: value2}``. These labels will be used to find existing objects (Pods and either Endpoints or ConfigMaps) associated with the current cluster. Also Patroni will set them on every object (Endpoint or ConfigMap) it creates. - **scope\_label**: (optional) name of the label containing cluster name. Default value is `cluster-name`. - **role\_label**: (optional) name of the label containing role (master or replica or other custom value). Patroni will set this label on the pod it runs in. Default value is ``role``. - **leader\_label\_value**: (optional) value of the pod label when Postgres role is ``master``. Default value is ``master``. - **follower\_label\_value**: (optional) value of the pod label when Postgres role is ``replica``. Default value is ``replica``. - **standby\_leader\_label\_value**: (optional) value of the pod label when Postgres role is ``standby_leader``. Default value is ``master``. - **tmp_\role\_label**: (optional) name of the temporary label containing role (master or replica). Value of this label will always use the default of corresponding role. Set only when necessary. - **use\_endpoints**: (optional) if set to true, Patroni will use Endpoints instead of ConfigMaps to run leader elections and keep cluster state. - **pod\_ip**: (optional) IP address of the pod Patroni is running in. This value is required when `use_endpoints` is enabled and is used to populate the leader endpoint subsets when the pod's PostgreSQL is promoted. - **ports**: (optional) if the Service object has the name for the port, the same name must appear in the Endpoint object, otherwise service won't work. For example, if your service is defined as ``{Kind: Service, spec: {ports: [{name: postgresql, port: 5432, targetPort: 5432}]}}``, then you have to set ``kubernetes.ports: [{"name": "postgresql", "port": 5432}]`` and Patroni will use it for updating subsets of the leader Endpoint. This parameter is used only if `kubernetes.use_endpoints` is set. - **cacert**: (optional) Specifies the file with the CA_BUNDLE file with certificates of trusted CAs to use while verifying Kubernetes API SSL certs. If not provided, patroni will use the value provided by the ServiceAccount secret. - **retriable\_http\_codes**: (optional) list of HTTP status codes from K8s API to retry on. By default Patroni is retrying on ``500``, ``503``, and ``504``, or if K8s API response has ``retry-after`` HTTP header. .. _raft_settings: Raft (deprecated) ----------------- - **self\_addr**: ``ip:port`` to listen on for Raft connections. The ``self_addr`` must be accessible from other nodes of the cluster. If not set, the node will not participate in consensus. - **bind\_addr**: (optional) ``ip:port`` to listen on for Raft connections. If not specified the ``self_addr`` will be used. - **partner\_addrs**: list of other Patroni nodes in the cluster in format: ['ip1:port', 'ip2:port', 'etc...'] - **data\_dir**: directory where to store Raft log and snapshot. If not specified the current working directory is used. - **password**: (optional) Encrypt Raft traffic with a specified password, requires ``cryptography`` python module. Short FAQ about Raft implementation - Q: How to list all the nodes providing consensus? A: ``syncobj_admin -conn host:port -status`` where the host:port is the address of one of the cluster nodes - Q: Node that was a part of consensus and has gone and I can't reuse the same IP for other node. How to remove this node from the consensus? A: ``syncobj_admin -conn host:port -remove host2:port2`` where the ``host2:port2`` is the address of the node you want to remove from consensus. - Q: Where to get the ``syncobj_admin`` utility? A: It is installed together with ``pysyncobj`` module (python RAFT implementation), which is Patroni dependency. - Q: it is possible to run Patroni node without adding in to the consensus? A: Yes, just comment out or remove ``raft.self_addr`` from Patroni configuration. - Q: It is possible to run Patroni and PostgreSQL only on two nodes? A: Yes, on the third node you can run ``patroni_raft_controller`` (without Patroni and PostgreSQL). In such a setup, one can temporarily lose one node without affecting the primary. .. _postgresql_settings: PostgreSQL ---------- - **postgresql**: - **authentication**: - **superuser**: - **username**: name for the superuser, set during initialization (initdb) and later used by Patroni to connect to the postgres. - **password**: password for the superuser, set during initialization (initdb). - **sslmode**: (optional) maps to the `sslmode `__ connection parameter, which allows a client to specify the type of TLS negotiation mode with the server. For more information on how each mode works, please visit the `PostgreSQL documentation `__. The default mode is ``prefer``. - **sslkey**: (optional) maps to the `sslkey `__ connection parameter, which specifies the location of the secret key used with the client's certificate. - **sslpassword**: (optional) maps to the `sslpassword `__ connection parameter, which specifies the password for the secret key specified in ``sslkey``. - **sslcert**: (optional) maps to the `sslcert `__ connection parameter, which specifies the location of the client certificate. - **sslrootcert**: (optional) maps to the `sslrootcert `__ connection parameter, which specifies the location of a file containing one ore more certificate authorities (CA) certificates that the client will use to verify a server's certificate. - **sslcrl**: (optional) maps to the `sslcrl `__ connection parameter, which specifies the location of a file containing a certificate revocation list. A client will reject connecting to any server that has a certificate present in this list. - **sslcrldir**: (optional) maps to the `sslcrldir `__ connection parameter, which specifies the location of a directory with files containing a certificate revocation list. A client will reject connecting to any server that has a certificate present in this list. - **gssencmode**: (optional) maps to the `gssencmode `__ connection parameter, which determines whether or with what priority a secure GSS TCP/IP connection will be negotiated with the server - **channel_binding**: (optional) maps to the `channel_binding `__ connection parameter, which controls the client's use of channel binding. - **replication**: - **username**: replication username; the user will be created during initialization. Replicas will use this user to access the replication source via streaming replication - **password**: replication password; the user will be created during initialization. - **sslmode**: (optional) maps to the `sslmode `__ connection parameter, which allows a client to specify the type of TLS negotiation mode with the server. For more information on how each mode works, please visit the `PostgreSQL documentation `__. The default mode is ``prefer``. - **sslkey**: (optional) maps to the `sslkey `__ connection parameter, which specifies the location of the secret key used with the client's certificate. - **sslpassword**: (optional) maps to the `sslpassword `__ connection parameter, which specifies the password for the secret key specified in ``sslkey``. - **sslcert**: (optional) maps to the `sslcert `__ connection parameter, which specifies the location of the client certificate. - **sslrootcert**: (optional) maps to the `sslrootcert `__ connection parameter, which specifies the location of a file containing one ore more certificate authorities (CA) certificates that the client will use to verify a server's certificate. - **sslcrl**: (optional) maps to the `sslcrl `__ connection parameter, which specifies the location of a file containing a certificate revocation list. A client will reject connecting to any server that has a certificate present in this list. - **sslcrldir**: (optional) maps to the `sslcrldir `__ connection parameter, which specifies the location of a directory with files containing a certificate revocation list. A client will reject connecting to any server that has a certificate present in this list. - **gssencmode**: (optional) maps to the `gssencmode `__ connection parameter, which determines whether or with what priority a secure GSS TCP/IP connection will be negotiated with the server - **channel_binding**: (optional) maps to the `channel_binding `__ connection parameter, which controls the client's use of channel binding. - **rewind**: - **username**: (optional) name for the user for ``pg_rewind``; the user will be created during initialization of postgres 11+ and all necessary `permissions `__ will be granted. - **password**: (optional) password for the user for ``pg_rewind``; the user will be created during initialization. - **sslmode**: (optional) maps to the `sslmode `__ connection parameter, which allows a client to specify the type of TLS negotiation mode with the server. For more information on how each mode works, please visit the `PostgreSQL documentation `__. The default mode is ``prefer``. - **sslkey**: (optional) maps to the `sslkey `__ connection parameter, which specifies the location of the secret key used with the client's certificate. - **sslpassword**: (optional) maps to the `sslpassword `__ connection parameter, which specifies the password for the secret key specified in ``sslkey``. - **sslcert**: (optional) maps to the `sslcert `__ connection parameter, which specifies the location of the client certificate. - **sslrootcert**: (optional) maps to the `sslrootcert `__ connection parameter, which specifies the location of a file containing one ore more certificate authorities (CA) certificates that the client will use to verify a server's certificate. - **sslcrl**: (optional) maps to the `sslcrl `__ connection parameter, which specifies the location of a file containing a certificate revocation list. A client will reject connecting to any server that has a certificate present in this list. - **sslcrldir**: (optional) maps to the `sslcrldir `__ connection parameter, which specifies the location of a directory with files containing a certificate revocation list. A client will reject connecting to any server that has a certificate present in this list. - **gssencmode**: (optional) maps to the `gssencmode `__ connection parameter, which determines whether or with what priority a secure GSS TCP/IP connection will be negotiated with the server - **channel_binding**: (optional) maps to the `channel_binding `__ connection parameter, which controls the client's use of channel binding. - **callbacks**: callback scripts to run on certain actions. Patroni will pass the action, role and cluster name. (See scripts/aws.py as an example of how to write them.) - **on\_reload**: run this script when configuration reload is triggered. - **on\_restart**: run this script when the postgres restarts (without changing role). - **on\_role\_change**: run this script when the postgres is being promoted or demoted. - **on\_start**: run this script when the postgres starts. - **on\_stop**: run this script when the postgres stops. - **connect\_address**: IP address + port through which Postgres is accessible from other nodes and applications. - **proxy\_address**: IP address + port through which a connection pool (e.g. pgbouncer) running next to Postgres is accessible. The value is written to the member key in DCS as ``proxy_url`` and could be used/useful for service discovery. - **create\_replica\_methods**: an ordered list of the create methods for turning a Patroni node into a new replica. "basebackup" is the default method; other methods are assumed to refer to scripts, each of which is configured as its own config item. See :ref:`custom replica creation methods documentation ` for further explanation. - **data\_dir**: The location of the Postgres data directory, either :ref:`existing ` or to be initialized by Patroni. - **config\_dir**: The location of the Postgres configuration directory, defaults to the data directory. Must be writable by Patroni. - **bin\_dir**: (optional) Path to PostgreSQL binaries (pg_ctl, initdb, pg_controldata, pg_basebackup, postgres, pg_isready, pg_rewind). If not provided or is an empty string, PATH environment variable will be used to find the executables. - **bin\_name**: (optional) Make it possible to override Postgres binary names, if you are using a custom Postgres distribution: - **pg\_ctl**: (optional) Custom name for ``pg_ctl`` binary. - **initdb**: (optional) Custom name for ``initdb`` binary. - **pg\controldata**: (optional) Custom name for ``pg_controldata`` binary. - **pg\_basebackup**: (optional) Custom name for ``pg_basebackup`` binary. - **postgres**: (optional) Custom name for ``postgres`` binary. - **pg\_isready**: (optional) Custom name for ``pg_isready`` binary. - **pg\_rewind**: (optional) Custom name for ``pg_rewind`` binary. - **listen**: IP address + port that Postgres listens to; must be accessible from other nodes in the cluster, if you're using streaming replication. Multiple comma-separated addresses are permitted, as long as the port component is appended after to the last one with a colon, i.e. ``listen: 127.0.0.1,127.0.0.2:5432``. Patroni will use the first address from this list to establish local connections to the PostgreSQL node. - **use\_unix\_socket**: specifies that Patroni should prefer to use unix sockets to connect to the cluster. Default value is ``false``. If ``unix_socket_directories`` is defined, Patroni will use the first suitable value from it to connect to the cluster and fallback to tcp if nothing is suitable. If ``unix_socket_directories`` is not specified in ``postgresql.parameters``, Patroni will assume that the default value should be used and omit ``host`` from the connection parameters. - **use\_unix\_socket\_repl**: specifies that Patroni should prefer to use unix sockets for replication user cluster connection. Default value is ``false``. If ``unix_socket_directories`` is defined, Patroni will use the first suitable value from it to connect to the cluster and fallback to tcp if nothing is suitable. If ``unix_socket_directories`` is not specified in ``postgresql.parameters``, Patroni will assume that the default value should be used and omit ``host`` from the connection parameters. - **pgpass**: path to the `.pgpass `__ password file. Patroni creates this file before executing pg\_basebackup, the post_init script and under some other circumstances. The location must be writable by Patroni. - **recovery\_conf**: additional configuration settings written to recovery.conf when configuring follower. - **custom\_conf** : path to an optional custom ``postgresql.conf`` file, that will be used in place of ``postgresql.base.conf``. The file must exist on all cluster nodes, be readable by PostgreSQL and will be included from its location on the real ``postgresql.conf``. Note that Patroni will not monitor this file for changes, nor backup it. However, its settings can still be overridden by Patroni's own configuration facilities - see :ref:`dynamic configuration ` for details. - **parameters**: list of configuration settings for Postgres. Many of these are required for replication to work. - **pg\_hba**: list of lines that Patroni will use to generate ``pg_hba.conf``. Patroni ignores this parameter if ``hba_file`` PostgreSQL parameter is set to a non-default value. Together with :ref:`dynamic configuration ` this parameter simplifies management of ``pg_hba.conf``. - **- host all all 0.0.0.0/0 md5** - **- host replication replicator 127.0.0.1/32 md5**: A line like this is required for replication. - **pg\_ident**: list of lines that Patroni will use to generate ``pg_ident.conf``. Patroni ignores this parameter if ``ident_file`` PostgreSQL parameter is set to a non-default value. Together with :ref:`dynamic configuration ` this parameter simplifies management of ``pg_ident.conf``. - **- mapname1 systemname1 pguser1** - **- mapname1 systemname2 pguser2** - **pg\_ctl\_timeout**: How long should pg_ctl wait when doing ``start``, ``stop`` or ``restart``. Default value is 60 seconds. - **use\_pg\_rewind**: try to use pg\_rewind on the former leader when it joins cluster as a replica. - **remove\_data\_directory\_on\_rewind\_failure**: If this option is enabled, Patroni will remove the PostgreSQL data directory and recreate the replica. Otherwise it will try to follow the new leader. Default value is **false**. - **remove\_data\_directory\_on\_diverged\_timelines**: Patroni will remove the PostgreSQL data directory and recreate the replica if it notices that timelines are diverging and the former primary can not start streaming from the new primary. This option is useful when ``pg_rewind`` can not be used. While performing timelines divergence check on PostgreSQL v10 and older Patroni will try to connect with replication credential to the "postgres" database. Hence, such access should be allowed in the pg_hba.conf. Default value is **false**. - **replica\_method**: for each create_replica_methods other than basebackup, you would add a configuration section of the same name. At a minimum, this should include "command" with a full path to the actual script to be executed. Other configuration parameters will be passed along to the script in the form "parameter=value". - **pre\_promote**: a fencing script that executes during a failover after acquiring the leader lock but before promoting the replica. If the script exits with a non-zero code, Patroni does not promote the replica and removes the leader key from DCS. - **before\_stop**: a script that executes immediately prior to stopping postgres. As opposed to a callback, this script runs synchronously, blocking shutdown until it has completed. The return code of this script does not impact whether shutdown proceeds afterwards. .. _restapi_settings: REST API -------- - **restapi**: - **connect\_address**: IP address (or hostname) and port, to access the Patroni's :ref:`REST API `. All the members of the cluster must be able to connect to this address, so unless the Patroni setup is intended for a demo inside the localhost, this address must be a non "localhost" or loopback address (ie: "localhost" or "127.0.0.1"). It can serve as an endpoint for HTTP health checks (read below about the "listen" REST API parameter), and also for user queries (either directly or via the REST API), as well as for the health checks done by the cluster members during leader elections (for example, to determine whether the leader is still running, or if there is a node which has a WAL position that is ahead of the one doing the query; etc.) The connect_address is put in the member key in DCS, making it possible to translate the member name into the address to connect to its REST API. - **listen**: IP address (or hostname) and port that Patroni will listen to for the REST API - to provide also the same health checks and cluster messaging between the participating nodes, as described above. to provide health-check information for HAProxy (or any other load balancer capable of doing a HTTP "OPTION" or "GET" checks). - **authentication**: (optional) - **username**: Basic-auth username to protect unsafe REST API endpoints. - **password**: Basic-auth password to protect unsafe REST API endpoints. - **certfile**: (optional): Specifies the file with the certificate in the PEM format. If the certfile is not specified or is left empty, the API server will work without SSL. - **keyfile**: (optional): Specifies the file with the secret key in the PEM format. - **keyfile\_password**: (optional): Specifies a password for decrypting the keyfile. - **cafile**: (optional): Specifies the file with the CA_BUNDLE with certificates of trusted CAs to use while verifying client certs. - **ciphers**: (optional): Specifies the permitted cipher suites (e.g. "ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES128-GCM-SHA256:!SSLv1:!SSLv2:!SSLv3:!TLSv1:!TLSv1.1") - **verify\_client**: (optional): ``none`` (default), ``optional`` or ``required``. When ``none`` REST API will not check client certificates. When ``required`` client certificates are required for all REST API calls. When ``optional`` client certificates are required for all unsafe REST API endpoints. When ``required`` is used, then client authentication succeeds, if the certificate signature verification succeeds. For ``optional`` the client cert will only be checked for ``PUT``, ``POST``, ``PATCH``, and ``DELETE`` requests. - **allowlist**: (optional): Specifies the set of hosts that are allowed to call unsafe REST API endpoints. The single element could be a host name, an IP address or a network address using CIDR notation. By default ``allow all`` is used. In case if ``allowlist`` or ``allowlist_include_members`` are set, anything that is not included is rejected. - **allowlist\_include\_members**: (optional): If set to ``true`` it allows accessing unsafe REST API endpoints from other cluster members registered in DCS (IP address or hostname is taken from the members ``api_url``). Be careful, it might happen that OS will use a different IP for outgoing connections. - **http\_extra\_headers**: (optional): HTTP headers let the REST API server pass additional information with an HTTP response. - **https\_extra\_headers**: (optional): HTTPS headers let the REST API server pass additional information with an HTTP response when TLS is enabled. This will also pass additional information set in ``http_extra_headers``. - **request_queue_size**: (optional): Sets request queue size for TCP socket used by Patroni REST API. Once the queue is full, further requests get a "Connection denied" error. The default value is 5. Here is an example of both **http_extra_headers** and **https_extra_headers**: .. code:: YAML restapi: listen: connect_address: authentication: username: password: http_extra_headers: 'X-Frame-Options': 'SAMEORIGIN' 'X-XSS-Protection': '1; mode=block' 'X-Content-Type-Options': 'nosniff' cafile: certfile: keyfile: https_extra_headers: 'Strict-Transport-Security': 'max-age=31536000; includeSubDomains' .. warning:: - The ``restapi.connect_address`` must be accessible from all nodes of a given Patroni cluster. Internally Patroni is using it during the leader race to find nodes with minimal replication lag. - If you enabled client certificates validation (``restapi.verify_client`` is set to ``required``), you also **must** provide **valid client certificates** in the ``ctl.certfile``, ``ctl.keyfile``, ``ctl.keyfile_password``. If not provided, Patroni will not work correctly. .. _patronictl_settings: CTL --- - **ctl**: (optional) - **authentication**: - **username**: Basic-auth username for accessing protected REST API endpoints. If not provided :ref:`patronictl` will use the value provided for REST API "username" parameter. - **password**: Basic-auth password for accessing protected REST API endpoints. If not provided :ref:`patronictl` will use the value provided for REST API "password" parameter. - **insecure**: Allow connections to REST API without verifying SSL certs. - **cacert**: Specifies the file with the CA_BUNDLE file or directory with certificates of trusted CAs to use while verifying REST API SSL certs. If not provided :ref:`patronictl` will use the value provided for REST API "cafile" parameter. - **certfile**: Specifies the file with the client certificate in the PEM format. - **keyfile**: Specifies the file with the client secret key in the PEM format. - **keyfile\_password**: Specifies a password for decrypting the client keyfile. Watchdog -------- - **mode**: ``off``, ``automatic`` or ``required``. When ``off`` watchdog is disabled. When ``automatic`` watchdog will be used if available, but ignored if it is not. When ``required`` the node will not become a leader unless watchdog can be successfully enabled. - **device**: Path to watchdog device. Defaults to ``/dev/watchdog``. - **safety_margin**: Number of seconds of safety margin between watchdog triggering and leader key expiration. .. _tags_settings: Tags ---- - **clonefrom**: ``true`` or ``false``. If set to ``true`` other nodes might prefer to use this node for bootstrap (take ``pg_basebackup`` from). If there are several nodes with ``clonefrom`` tag set to ``true`` the node to bootstrap from will be chosen randomly. The default value is ``false``. - **noloadbalance**: ``true`` or ``false``. If set to ``true`` the node will return HTTP Status Code 503 for the ``GET /replica`` REST API health-check and therefore will be excluded from the load-balancing. Defaults to ``false``. - **replicatefrom**: The IP address/hostname of another replica. Used to support cascading replication. - **nosync**: ``true`` or ``false``. If set to ``true`` the node will never be selected as a synchronous replica. - **nofailover**: ``true`` or ``false``, controls whether this node is allowed to participate in the leader race and become a leader. Defaults to ``false``, meaning this node _can_ participate in leader races. - **failover_priority**: integer, controls the priority that this node should have during failover. Nodes with higher priority will be preferred over lower priority nodes if they received/replayed the same amount of WAL. However, nodes with higher values of receive/replay LSN are preferred regardless of their priority. If the ``failover_priority`` is 0 or negative - such node is not allowed to participate in the leader race and to become a leader (similar to ``nofailover: true``). .. warning:: Provide only one of ``nofailover`` or ``failover_priority``. Providing ``nofailover: true`` is the same as ``failover_priority: 0``, and providing ``nofailover: false`` will give the node priority 1. In addition to these predefined tags, you can also add your own ones: - **key1**: ``true`` - **key2**: ``false`` - **key3**: ``1.4`` - **key4**: ``"RandomString"`` Tags are visible in the :ref:`REST API ` and :ref:`patronictl_list` You can also check for an instance health using these tags. If the tag isn't defined for an instance, or if the respective value doesn't match the querying value, it will return HTTP Status Code 503. patroni-3.2.2/extras/000077500000000000000000000000001455170150700145055ustar00rootroot00000000000000patroni-3.2.2/extras/README.md000066400000000000000000000012271455170150700157660ustar00rootroot00000000000000### confd `confd` directory contains haproxy and pgbouncer template files for the [confd](https://github.com/kelseyhightower/confd) -- lightweight configuration management tool You need to copy content of `confd` directory into /etcd/confd and run confd service: ```bash $ confd -prefix=/service/$PATRONI_SCOPE -backend etcd -node $PATRONI_ETCD_URL -interval=10 ``` It will periodically update haproxy.cfg and pgbouncer.ini with the actual list of Patroni nodes from `etcd` and "reload" haproxy and pgbouncer.ini when it is necessary. ### startup-scripts `startup-scripts` directory contains startup scripts for various OSes and management tools for Patroni. patroni-3.2.2/extras/confd/000077500000000000000000000000001455170150700155765ustar00rootroot00000000000000patroni-3.2.2/extras/confd/conf.d/000077500000000000000000000000001455170150700167455ustar00rootroot00000000000000patroni-3.2.2/extras/confd/conf.d/haproxy.toml000066400000000000000000000004651455170150700213410ustar00rootroot00000000000000[template] #prefix = "/service/batman" #owner = "haproxy" #mode = "0644" src = "haproxy.tmpl" dest = "/etc/haproxy/haproxy.cfg" check_cmd = "/usr/sbin/haproxy -c -f {{ .src }}" reload_cmd = "haproxy -f /etc/haproxy/haproxy.cfg -p /var/run/haproxy.pid -D -sf $(cat /var/run/haproxy.pid)" keys = [ "/", ] patroni-3.2.2/extras/confd/conf.d/pgbouncer.toml000066400000000000000000000003241455170150700216250ustar00rootroot00000000000000[template] prefix = "/service/batman" owner = "postgres" mode = "0644" src = "pgbouncer.tmpl" dest = "/etc/pgbouncer/pgbouncer.ini" reload_cmd = "systemctl reload pgbouncer" keys = [ "/members/","/leader" ]patroni-3.2.2/extras/confd/templates/000077500000000000000000000000001455170150700175745ustar00rootroot00000000000000patroni-3.2.2/extras/confd/templates/haproxy-citus.tmpl000066400000000000000000000024121455170150700233100ustar00rootroot00000000000000global maxconn 100 defaults log global mode tcp retries 2 timeout client 30m timeout connect 4s timeout server 30m timeout check 5s listen stats mode http bind *:7000 stats enable stats uri / listen coordinator bind *:5000 option httpchk HEAD /primary http-check expect status 200 default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions {{range gets "/0/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split $data.conn_url "/") 2) "@" "/" -1)}} maxconn 100 check check-ssl port {{index (split (index (split $data.api_url "/") 2) ":") 1}} verify required ca-file /etc/ssl/certs/ssl-cert-snakeoil.pem crt /etc/ssl/private/ssl-cert-snakeoil.crt {{end}} listen workers bind *:5001 option httpchk HEAD /primary http-check expect status 200 default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions {{range gets "/*/members/*"}}{{$group := index (split .Key "/") 1}}{{if ne $group "0"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split $data.conn_url "/") 2) "@" "/" -1)}} maxconn 100 check check-ssl port {{index (split (index (split $data.api_url "/") 2) ":") 1}} verify required ca-file /etc/ssl/certs/ssl-cert-snakeoil.pem crt /etc/ssl/private/ssl-cert-snakeoil.crt {{end}}{{end}} patroni-3.2.2/extras/confd/templates/haproxy.tmpl000066400000000000000000000017361455170150700221730ustar00rootroot00000000000000global maxconn 100 defaults log global mode tcp retries 2 timeout client 30m timeout connect 4s timeout server 30m timeout check 5s listen stats mode http bind *:7000 stats enable stats uri / listen primary bind *:5000 option httpchk HEAD /primary http-check expect status 200 default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions {{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split $data.conn_url "/") 2) "@" "/" -1)}} maxconn 100 check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} {{end}} listen replicas bind *:5001 option httpchk HEAD /replica http-check expect status 200 default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions {{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split $data.conn_url "/") 2) "@" "/" -1)}} maxconn 100 check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} {{end}} patroni-3.2.2/extras/confd/templates/pgbouncer.tmpl000066400000000000000000000014071455170150700224600ustar00rootroot00000000000000[databases] {{with get "/leader"}}{{$leader := .Value}}{{$leadkey := printf "/members/%s" $leader}}{{with get $leadkey}}{{$data := json .Value}}{{$hostport := base (replace (index (split $data.conn_url "/") 2) "@" "/" -1)}}{{ $host := base (index (split $hostport ":") 0)}}{{ $port := base (index (split $hostport ":") 1)}}* = host={{ $host }} port={{ $port }} pool_size=10{{end}}{{end}} [pgbouncer] logfile = /var/log/postgresql/pgbouncer.log pidfile = /var/run/postgresql/pgbouncer.pid listen_addr = * listen_port = 6432 unix_socket_dir = /var/run/postgresql auth_type = trust auth_file = /etc/pgbouncer/userlist.txt auth_hba_file = /etc/pgbouncer/pg_hba.txt admin_users = pgbouncer stats_users = pgbouncer pool_mode = session max_client_conn = 100 default_pool_size = 20 patroni-3.2.2/extras/startup-scripts/000077500000000000000000000000001455170150700176745ustar00rootroot00000000000000patroni-3.2.2/extras/startup-scripts/README.md000066400000000000000000000021441455170150700211540ustar00rootroot00000000000000# startup scripts for Patroni This directory contains sample startup scripts for various OSes and management tools for Patroni. Scripts supplied: ### patroni.upstart.conf Upstart job for Ubuntu 12.04 or 14.04. Requires Upstart > 1.4. Intended for systems where Patroni has been installed on a base system, rather than in Docker. ### patroni.service Systemd service file, to be copied to /etc/systemd/system/patroni.service, tested on Centos 7.1 with Patroni installed from pip. ### patroni Init.d service file for Debian-like distributions. Copy it to /etc/init.d/, make executable: ```chmod 755 /etc/init.d/patroni``` and run with ```service patroni start```, or make it starting on boot with ```update-rc.d patroni defaults```. Also you might edit some configuration variables in it: PATRONI for patroni.py location CONF for configuration file LOGFILE for log (script creates it if does not exist) Note. If you have several versions of Postgres installed, please add to POSTGRES_VERSION the release number which you wish to run. Script uses this value to append PATH environment with correct path to Postgres bin. patroni-3.2.2/extras/startup-scripts/patroni000066400000000000000000000065311455170150700213000ustar00rootroot00000000000000#!/bin/sh # ### BEGIN INIT INFO # Provides: patroni # Required-Start: $remote_fs $syslog # Required-Stop: $remote_fs $syslog # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Patroni init script # Description: Runners to orchestrate a high-availability PostgreSQL ### END INIT INFO ### BEGIN USER CONFIGURATION CONF="/etc/patroni/postgres.yml" LOGFILE="/var/log/patroni.log" USER="postgres" GROUP="postgres" NAME=patroni PATRONI="/opt/patroni/$NAME.py" PIDFILE="/var/run/$NAME.pid" # Set this parameter, if you have several Postgres versions installed # POSTGRES_VERSION="9.4" POSTGRES_VERSION="" ### END USER CONFIGURATION . /lib/lsb/init-functions # Loading this library for get_versions() function if test ! -e /usr/share/postgresql-common/init.d-functions; then log_failure_msg "Probably postgresql-common does not installed." exit 1 else . /usr/share/postgresql-common/init.d-functions fi # Is there Patroni executable? if test ! -e $PATRONI; then log_failure_msg "Patroni executable $PATRONI does not exist." exit 1 fi # Is there Patroni configuration file? if test ! -e $CONF; then log_failure_msg "Patroni configuration file $CONF does not exist." exit 1 fi # Create logfile if doesn't exist if test ! -e $LOGFILE; then log_action_msg "Creating logfile for Patroni..." touch $LOGFILE chown $USER:$GROUP $LOGFILE fi prepare_pgpath() { if [ "$POSTGRES_VERSION" != "" ]; then if [ -x /usr/lib/postgresql/$POSTGRES_VERSION/bin/pg_ctl ]; then PGPATH="/usr/lib/postgresql/$POSTGRES_VERSION/bin" else log_failure_msg "Postgres version incorrect, check POSTGRES_VERSION variable." exit 0 fi else get_versions if echo $versions | grep -q -e "\s"; then log_warning_msg "You have several Postgres versions installed. Please, use POSTGRES_VERSION to define correct environment." else versions=`echo $versions | sed -e 's/^[ \t]*//'` PGPATH="/usr/lib/postgresql/$versions/bin" fi fi } get_pid() { if test -e $PIDFILE; then PID=`cat $PIDFILE` CHILDPID=`ps --ppid $PID -o %p --no-headers` else log_failure_msg "Could not find PID file. Patroni probably down." exit 1 fi } case "$1" in start) prepare_pgpath PGPATH=$PATH:$PGPATH log_success_msg "Starting Patroni\n" exec start-stop-daemon --start --quiet \ --background \ --pidfile $PIDFILE --make-pidfile \ --chuid $USER:$GROUP \ --chdir `eval echo ~$USER` \ --exec $PATRONI \ --startas /bin/sh -- \ -c "/usr/bin/env PATH=$PGPATH /usr/bin/python $PATRONI $CONF >> $LOGFILE 2>&1" ;; stop) log_success_msg "Stopping Patroni" get_pid start-stop-daemon --stop --pid $CHILDPID start-stop-daemon --stop --pidfile $PIDFILE --remove-pidfile --quiet ;; reload) log_success_msg "Reloading Patroni configuration" get_pid kill -HUP $CHILDPID ;; status) get_pid if start-stop-daemon -T --pid $CHILDPID; then log_success_msg "Patroni is running\n" exit 0 else log_warning_msg "Patroni in not running\n" fi ;; restart) $0 stop $0 start ;; *) echo "Usage: /etc/init.d/$NAME {start|stop|restart|reload|status}" exit 1 ;; esac if [ $? -eq 0 ]; then echo . exit 0 else echo " failed" exit 1 fi patroni-3.2.2/extras/startup-scripts/patroni.service000066400000000000000000000024351455170150700227360ustar00rootroot00000000000000# This is an example systemd config file for Patroni # You can copy it to "/etc/systemd/system/patroni.service", [Unit] Description=Runners to orchestrate a high-availability PostgreSQL After=syslog.target network.target [Service] Type=simple User=postgres Group=postgres # Read in configuration file if it exists, otherwise proceed EnvironmentFile=-/etc/patroni_env.conf # The default is the user's home directory, and if you want to change it, you must provide an absolute path. # WorkingDirectory=/home/sameuser # Where to send early-startup messages from the server # This is normally controlled by the global default set by systemd #StandardOutput=syslog # Pre-commands to start watchdog device # Uncomment if watchdog is part of your patroni setup #ExecStartPre=-/usr/bin/sudo /sbin/modprobe softdog #ExecStartPre=-/usr/bin/sudo /bin/chown postgres /dev/watchdog # Start the patroni process ExecStart=/bin/patroni /etc/patroni.yml # Send HUP to reload from patroni.yml ExecReload=/bin/kill -s HUP $MAINPID # Only kill the patroni process, not it's children, so it will gracefully stop postgres KillMode=process # Give a reasonable amount of time for the server to start up/shut down TimeoutSec=30 # Restart the service if it crashed Restart=on-failure [Install] WantedBy=multi-user.target patroni-3.2.2/extras/startup-scripts/patroni.upstart.conf000066400000000000000000000014641455170150700237250ustar00rootroot00000000000000# patroni - patroni daemon # # controls startup/shutdown of postgres # you should disable any postgres start jobs # # assumes that patroni has been installed into the # pythonpath by using setup.py install description "patroni start daemon" start on net-device-up stop on runlevel [06] respawn respawn limit 5 10 # set location of patroni env PATRONI=/usr/local/bin/patroni # virtualenv example # env PATRONI=/var/lib/postgresql/patronienv/bin/patroni # set location of config file env PATRONICONF=/etc/patroni/patroni.yml # set log dir for patroni logs # postgres user must have write permission env POSTGRESLOGDIR=/var/log/postgresql setuid postgres setgid postgres script exec start-stop-daemon --start \ --exec $PATRONI -- $PATRONICONF \ >> $POSTGRESLOGDIR/patroni.log 2>&1 end script patroni-3.2.2/features/000077500000000000000000000000001455170150700150155ustar00rootroot00000000000000patroni-3.2.2/features/Dockerfile000066400000000000000000000052771455170150700170220ustar00rootroot00000000000000# syntax = docker/dockerfile:1.5 # Used only for running tests using tox, see ../tox.ini ARG PG_MAJOR ARG PGHOME=/home/postgres ARG LC_ALL=C.UTF-8 ARG LANG=C.UTF-8 ARG BASE_IMAGE=postgres FROM ${BASE_IMAGE}:${PG_MAJOR} ARG PGHOME ARG LC_ALL ARG LANG ENV PGHOME="$PGHOME" ENV PG_USER="${PG_USER:-postgres}" ENV PG_GROUP="${PG_GROUP:-$PG_USER}" ENV LC_ALL="$LC_ALL" ENV LANG="$LANG" ARG ETCDVERSION=3.3.13 ENV ETCDVERSION="$ETCDVERSION" ARG ETCDURL="https://github.com/coreos/etcd/releases/download/v$ETCDVERSION" USER root RUN set -ex \ && apt-get update \ && apt-get reinstall init-system-helpers \ && apt-get install -y \ python3-dev \ python3-venv \ rsync \ curl \ gcc \ golang \ jq \ locales \ sudo \ busybox \ net-tools \ iputils-ping \ && rm -rf /var/cache/apt \ \ && python3 -m venv /tox \ && /tox/bin/pip install --no-cache-dir tox>=4 \ \ && mkdir -p "$PGHOME" \ && sed -i "s|/var/lib/postgresql.*|$PGHOME:/bin/bash|" /etc/passwd \ && chown -R "$PG_USER:$PG_GROUP" /var/log /home/postgres \ \ # Download etcd \ && curl -sL "$ETCDURL/etcd-v$ETCDVERSION-linux-$(dpkg --print-architecture).tar.gz" \ | tar xz -C /usr/local/bin --strip=1 --wildcards --no-anchored etcd etcdctl ENV PATH="/tox/bin:$PATH" # This Dockerfile syntax only works with docker buildx and the syntax # line at the top of this file. COPY </dev/null \\ | sed 's|^./||' >/tmp/copy_exclude.lst \\ || true runuser -u "\$PG_USER" -- \\ rsync -a \\ --exclude=.tox \\ --exclude="features/output*" \\ --exclude-from="/tmp/copy_exclude.lst" \\ . "\$PGHOME/src/" cd "\$PGHOME/src" runuser -u "\$PG_USER" -w ETCD_UNSUPPORTED_ARCH -- "\$@" & wait $! # SIGINT whilst child proc is running is not seen by trap so we run a copy here instead of using # trap copy_output SIGINT EXIT copy_output EOF RUN chmod +x /tox-wrapper.sh VOLUME /src ENTRYPOINT ["/tox-wrapper.sh"] patroni-3.2.2/features/archive-restore.py000066400000000000000000000013501455170150700204700ustar00rootroot00000000000000#!/usr/bin/env python import os import argparse import shutil if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--dirname", required=True) parser.add_argument("--pathname", required=True) parser.add_argument("--filename", required=True) parser.add_argument("--mode", required=True, choices=("archive", "restore")) args, _ = parser.parse_known_args() full_filename = os.path.join(args.dirname, args.filename) if args.mode == "archive": if not os.path.isdir(args.dirname): os.makedirs(args.dirname) if not os.path.exists(full_filename): shutil.copy(args.pathname, full_filename) else: shutil.copy(full_filename, args.pathname) patroni-3.2.2/features/backup_create.py000077500000000000000000000010561455170150700201640ustar00rootroot00000000000000#!/usr/bin/env python import argparse import subprocess import sys if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--datadir", required=True) parser.add_argument("--dbname", required=True) parser.add_argument("--walmethod", required=True, choices=("fetch", "stream", "none")) args, _ = parser.parse_known_args() walmethod = ["-X", args.walmethod] if args.walmethod != "none" else [] sys.exit(subprocess.call(["pg_basebackup", "-D", args.datadir, "-c", "fast", "-d", args.dbname] + walmethod)) patroni-3.2.2/features/backup_restore.py000077500000000000000000000005661455170150700204110ustar00rootroot00000000000000#!/usr/bin/env python import argparse import shutil if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--datadir", required=True) parser.add_argument("--sourcedir", required=True) parser.add_argument("--test-argument", required=True) args, _ = parser.parse_known_args() shutil.copytree(args.sourcedir, args.datadir) patroni-3.2.2/features/basic_replication.feature000066400000000000000000000111761455170150700220520ustar00rootroot00000000000000Feature: basic replication We should check that the basic bootstrapping, replication and failover works. Scenario: check replication of a single table Given I start postgres0 Then postgres0 is a leader after 10 seconds And there is a non empty initialize key in DCS after 15 seconds When I issue a PATCH request to http://127.0.0.1:8008/config with {"ttl": 20, "synchronous_mode": true} Then I receive a response code 200 When I start postgres1 And I configure and start postgres2 with a tag replicatefrom postgres0 And "sync" key in DCS has leader=postgres0 after 20 seconds And I add the table foo to postgres0 Then table foo is present on postgres1 after 20 seconds Then table foo is present on postgres2 after 20 seconds Scenario: check restart of sync replica Given I shut down postgres2 Then "sync" key in DCS has sync_standby=postgres1 after 5 seconds When I start postgres2 And I shut down postgres1 Then "sync" key in DCS has sync_standby=postgres2 after 10 seconds When I start postgres1 Then "members/postgres1" key in DCS has state=running after 10 seconds And Status code on GET http://127.0.0.1:8010/sync is 200 after 3 seconds And Status code on GET http://127.0.0.1:8009/async is 200 after 3 seconds Scenario: check stuck sync replica Given I issue a PATCH request to http://127.0.0.1:8008/config with {"pause": true, "maximum_lag_on_syncnode": 15000000, "postgresql": {"parameters": {"synchronous_commit": "remote_apply"}}} Then I receive a response code 200 And I create table on postgres0 And table mytest is present on postgres1 after 2 seconds And table mytest is present on postgres2 after 2 seconds When I pause wal replay on postgres2 And I load data on postgres0 Then "sync" key in DCS has sync_standby=postgres1 after 15 seconds And I resume wal replay on postgres2 And Status code on GET http://127.0.0.1:8009/sync is 200 after 3 seconds And Status code on GET http://127.0.0.1:8010/async is 200 after 3 seconds When I issue a PATCH request to http://127.0.0.1:8008/config with {"pause": null, "maximum_lag_on_syncnode": -1, "postgresql": {"parameters": {"synchronous_commit": "on"}}} Then I receive a response code 200 And I drop table on postgres0 Scenario: check multi sync replication Given I issue a PATCH request to http://127.0.0.1:8008/config with {"synchronous_node_count": 2} Then I receive a response code 200 Then "sync" key in DCS has sync_standby=postgres1,postgres2 after 10 seconds And Status code on GET http://127.0.0.1:8010/sync is 200 after 3 seconds And Status code on GET http://127.0.0.1:8009/sync is 200 after 3 seconds When I issue a PATCH request to http://127.0.0.1:8008/config with {"synchronous_node_count": 1} Then I receive a response code 200 And I shut down postgres1 Then "sync" key in DCS has sync_standby=postgres2 after 10 seconds When I start postgres1 Then "members/postgres1" key in DCS has state=running after 10 seconds And Status code on GET http://127.0.0.1:8010/sync is 200 after 3 seconds And Status code on GET http://127.0.0.1:8009/async is 200 after 3 seconds Scenario: check the basic failover in synchronous mode Given I run patronictl.py pause batman Then I receive a response returncode 0 When I sleep for 2 seconds And I shut down postgres0 And I run patronictl.py resume batman Then I receive a response returncode 0 And postgres2 role is the primary after 24 seconds And Response on GET http://127.0.0.1:8010/history contains recovery after 10 seconds And there is a postgres2_cb.log with "on_role_change master batman" in postgres2 data directory When I issue a PATCH request to http://127.0.0.1:8010/config with {"synchronous_mode": null, "master_start_timeout": 0} Then I receive a response code 200 When I add the table bar to postgres2 Then table bar is present on postgres1 after 20 seconds And Response on GET http://127.0.0.1:8010/config contains master_start_timeout after 10 seconds Scenario: check rejoin of the former primary with pg_rewind Given I add the table splitbrain to postgres0 And I start postgres0 Then postgres0 role is the secondary after 20 seconds When I add the table buz to postgres2 Then table buz is present on postgres0 after 20 seconds @reject-duplicate-name Scenario: check graceful rejection when two nodes have the same name Given I start duplicate postgres0 on port 8011 Then there is one of ["Can't start; there is already a node named 'postgres0' running"] CRITICAL in the dup-postgres0 patroni log after 5 seconds patroni-3.2.2/features/callback2.py000077500000000000000000000002231455170150700172050ustar00rootroot00000000000000#!/usr/bin/env python import sys with open("data/{0}/{0}_cb.log".format(sys.argv[1]), "a+") as log: log.write(" ".join(sys.argv[-3:]) + "\n") patroni-3.2.2/features/cascading_replication.feature000066400000000000000000000014301455170150700226750ustar00rootroot00000000000000Feature: cascading replication We should check that patroni can do base backup and streaming from the replica Scenario: check a base backup and streaming replication from a replica Given I start postgres0 And postgres0 is a leader after 10 seconds And I configure and start postgres1 with a tag clonefrom true And replication works from postgres0 to postgres1 after 20 seconds And I create label with "postgres0" in postgres0 data directory And I create label with "postgres1" in postgres1 data directory And "members/postgres1" key in DCS has state=running after 12 seconds And I configure and start postgres2 with a tag replicatefrom postgres1 Then replication works from postgres0 to postgres2 after 30 seconds And there is a label with "postgres1" in postgres2 data directory patroni-3.2.2/features/citus.feature000066400000000000000000000105361455170150700175260ustar00rootroot00000000000000Feature: citus We should check that coordinator discovers and registers workers and clients don't have errors when worker cluster switches over Scenario: check that worker cluster is registered in the coordinator Given I start postgres0 in citus group 0 And I start postgres2 in citus group 1 Then postgres0 is a leader in a group 0 after 10 seconds And postgres2 is a leader in a group 1 after 10 seconds When I start postgres1 in citus group 0 And I start postgres3 in citus group 1 Then replication works from postgres0 to postgres1 after 15 seconds Then replication works from postgres2 to postgres3 after 15 seconds And postgres0 is registered in the postgres0 as the primary in group 0 after 5 seconds And postgres2 is registered in the postgres0 as the primary in group 1 after 5 seconds Scenario: coordinator failover updates pg_dist_node Given I run patronictl.py failover batman --group 0 --candidate postgres1 --force Then postgres1 role is the primary after 10 seconds And "members/postgres0" key in a group 0 in DCS has state=running after 15 seconds And replication works from postgres1 to postgres0 after 15 seconds And postgres1 is registered in the postgres2 as the primary in group 0 after 5 seconds And "sync" key in a group 0 in DCS has sync_standby=postgres0 after 15 seconds When I run patronictl.py switchover batman --group 0 --candidate postgres0 --force Then postgres0 role is the primary after 10 seconds And replication works from postgres0 to postgres1 after 15 seconds And postgres0 is registered in the postgres2 as the primary in group 0 after 5 seconds And "sync" key in a group 0 in DCS has sync_standby=postgres1 after 15 seconds Scenario: worker switchover doesn't break client queries on the coordinator Given I create a distributed table on postgres0 And I start a thread inserting data on postgres0 When I run patronictl.py switchover batman --group 1 --force Then I receive a response returncode 0 And postgres3 role is the primary after 10 seconds And "members/postgres2" key in a group 1 in DCS has state=running after 15 seconds And replication works from postgres3 to postgres2 after 15 seconds And postgres3 is registered in the postgres0 as the primary in group 1 after 5 seconds And "sync" key in a group 1 in DCS has sync_standby=postgres2 after 15 seconds And a thread is still alive When I run patronictl.py switchover batman --group 1 --force Then I receive a response returncode 0 And postgres2 role is the primary after 10 seconds And replication works from postgres2 to postgres3 after 15 seconds And postgres2 is registered in the postgres0 as the primary in group 1 after 5 seconds And "sync" key in a group 1 in DCS has sync_standby=postgres3 after 15 seconds And a thread is still alive When I stop a thread Then a distributed table on postgres0 has expected rows Scenario: worker primary restart doesn't break client queries on the coordinator Given I cleanup a distributed table on postgres0 And I start a thread inserting data on postgres0 When I run patronictl.py restart batman postgres2 --group 1 --force Then I receive a response returncode 0 And postgres2 role is the primary after 10 seconds And replication works from postgres2 to postgres3 after 15 seconds And postgres2 is registered in the postgres0 as the primary in group 1 after 5 seconds And a thread is still alive When I stop a thread Then a distributed table on postgres0 has expected rows Scenario: check that in-flight transaction is rolled back after timeout when other workers need to change pg_dist_node Given I start postgres4 in citus group 2 Then postgres4 is a leader in a group 2 after 10 seconds And "members/postgres4" key in a group 2 in DCS has role=master after 3 seconds When I run patronictl.py edit-config batman --group 2 -s ttl=20 --force Then I receive a response returncode 0 And I receive a response output "+ttl: 20" Then postgres4 is registered in the postgres2 as the primary in group 2 after 5 seconds When I shut down postgres4 Then there is a transaction in progress on postgres0 changing pg_dist_node after 5 seconds When I run patronictl.py restart batman postgres2 --group 1 --force Then a transaction finishes in 20 seconds patroni-3.2.2/features/custom_bootstrap.feature000066400000000000000000000014141455170150700220010ustar00rootroot00000000000000Feature: custom bootstrap We should check that patroni can bootstrap a new cluster from a backup Scenario: clone existing cluster using pg_basebackup Given I start postgres0 Then postgres0 is a leader after 10 seconds When I add the table foo to postgres0 And I start postgres1 in a cluster batman1 as a clone of postgres0 Then postgres1 is a leader of batman1 after 10 seconds Then table foo is present on postgres1 after 10 seconds Scenario: make a backup and do a restore into a new cluster Given I add the table bar to postgres1 And I do a backup of postgres1 When I start postgres2 in a cluster batman2 from backup Then postgres2 is a leader of batman2 after 30 seconds And table bar is present on postgres2 after 10 seconds patroni-3.2.2/features/dcs_failsafe_mode.feature000066400000000000000000000144611455170150700220070ustar00rootroot00000000000000Feature: dcs failsafe mode We should check the basic dcs failsafe mode functioning Scenario: check failsafe mode can be successfully enabled Given I start postgres0 And postgres0 is a leader after 10 seconds Then "config" key in DCS has ttl=30 after 10 seconds When I issue a PATCH request to http://127.0.0.1:8008/config with {"loop_wait": 2, "ttl": 20, "retry_timeout": 3, "failsafe_mode": true} Then I receive a response code 200 And Response on GET http://127.0.0.1:8008/failsafe contains postgres0 after 10 seconds When I issue a GET request to http://127.0.0.1:8008/failsafe Then I receive a response code 200 And I receive a response postgres0 http://127.0.0.1:8008/patroni When I issue a PATCH request to http://127.0.0.1:8008/config with {"postgresql": {"parameters": {"wal_level": "logical"}},"slots":{"dcs_slot_1": null,"postgres0":null}} Then I receive a response code 200 When I issue a PATCH request to http://127.0.0.1:8008/config with {"slots": {"dcs_slot_0": {"type": "logical", "database": "postgres", "plugin": "test_decoding"}}} Then I receive a response code 200 @dcs-failsafe Scenario: check one-node cluster is functioning while DCS is down Given DCS is down Then Response on GET http://127.0.0.1:8008/primary contains failsafe_mode_is_active after 12 seconds And postgres0 role is the primary after 10 seconds @dcs-failsafe Scenario: check new replica isn't promoted when leader is down and DCS is up Given DCS is up When I do a backup of postgres0 And I shut down postgres0 When I start postgres1 in a cluster batman from backup with no_leader Then postgres1 role is the replica after 12 seconds Scenario: check leader and replica are both in /failsafe key after leader is back Given I start postgres0 And I start postgres1 Then "members/postgres0" key in DCS has state=running after 10 seconds And "members/postgres1" key in DCS has state=running after 2 seconds And Response on GET http://127.0.0.1:8009/failsafe contains postgres1 after 10 seconds When I issue a GET request to http://127.0.0.1:8009/failsafe Then I receive a response code 200 And I receive a response postgres0 http://127.0.0.1:8008/patroni And I receive a response postgres1 http://127.0.0.1:8009/patroni @dcs-failsafe @slot-advance Scenario: check leader and replica are functioning while DCS is down Given I get all changes from physical slot dcs_slot_1 on postgres0 Then physical slot dcs_slot_1 is in sync between postgres0 and postgres1 after 10 seconds And logical slot dcs_slot_0 is in sync between postgres0 and postgres1 after 10 seconds And DCS is down Then Response on GET http://127.0.0.1:8008/primary contains failsafe_mode_is_active after 12 seconds Then postgres0 role is the primary after 10 seconds And postgres1 role is the replica after 2 seconds And replication works from postgres0 to postgres1 after 10 seconds When I get all changes from logical slot dcs_slot_0 on postgres0 And I get all changes from physical slot dcs_slot_1 on postgres0 Then logical slot dcs_slot_0 is in sync between postgres0 and postgres1 after 20 seconds And physical slot dcs_slot_1 is in sync between postgres0 and postgres1 after 10 seconds @dcs-failsafe Scenario: check primary is demoted when one replica is shut down and DCS is down Given DCS is down And I kill postgres1 And I kill postmaster on postgres1 Then postgres0 role is the replica after 12 seconds @dcs-failsafe Scenario: check known replica is promoted when leader is down and DCS is up Given I kill postgres0 And I shut down postmaster on postgres0 And DCS is up When I start postgres1 Then "members/postgres1" key in DCS has state=running after 10 seconds And postgres1 role is the primary after 25 seconds @dcs-failsafe Scenario: scale to three-node cluster Given I start postgres0 And I start postgres2 Then "members/postgres2" key in DCS has state=running after 10 seconds And "members/postgres0" key in DCS has state=running after 20 seconds And Response on GET http://127.0.0.1:8008/failsafe contains postgres2 after 10 seconds And replication works from postgres1 to postgres0 after 10 seconds And replication works from postgres1 to postgres2 after 10 seconds @dcs-failsafe @slot-advance Scenario: make sure permanent slots exist on replicas Given I issue a PATCH request to http://127.0.0.1:8009/config with {"slots":{"dcs_slot_0":null,"dcs_slot_2":{"type":"logical","database":"postgres","plugin":"test_decoding"}}} Then logical slot dcs_slot_2 is in sync between postgres1 and postgres0 after 20 seconds And logical slot dcs_slot_2 is in sync between postgres1 and postgres2 after 20 seconds When I get all changes from physical slot dcs_slot_1 on postgres1 Then physical slot dcs_slot_1 is in sync between postgres1 and postgres0 after 10 seconds And physical slot dcs_slot_1 is in sync between postgres1 and postgres2 after 10 seconds And physical slot postgres0 is in sync between postgres1 and postgres2 after 10 seconds @dcs-failsafe Scenario: check three-node cluster is functioning while DCS is down Given DCS is down Then Response on GET http://127.0.0.1:8009/primary contains failsafe_mode_is_active after 12 seconds Then postgres1 role is the primary after 10 seconds And postgres0 role is the replica after 2 seconds And postgres2 role is the replica after 2 seconds @dcs-failsafe @slot-advance Scenario: check that permanent slots are in sync between nodes while DCS is down Given replication works from postgres1 to postgres0 after 10 seconds And replication works from postgres1 to postgres2 after 10 seconds When I get all changes from logical slot dcs_slot_2 on postgres1 And I get all changes from physical slot dcs_slot_1 on postgres1 Then logical slot dcs_slot_2 is in sync between postgres1 and postgres0 after 20 seconds And logical slot dcs_slot_2 is in sync between postgres1 and postgres2 after 20 seconds And physical slot dcs_slot_1 is in sync between postgres1 and postgres0 after 10 seconds And physical slot dcs_slot_1 is in sync between postgres1 and postgres2 after 10 seconds And physical slot postgres0 is in sync between postgres1 and postgres2 after 10 seconds patroni-3.2.2/features/environment.py000066400000000000000000001325221455170150700177400ustar00rootroot00000000000000import abc import datetime import glob import os import json import psutil import re import shutil import signal import stat import subprocess import sys import tempfile import threading import time import yaml import patroni.psycopg as psycopg from http.server import BaseHTTPRequestHandler, HTTPServer from patroni.request import PatroniRequest class AbstractController(abc.ABC): def __init__(self, context, name, work_directory, output_dir): self._context = context self._name = name self._work_directory = work_directory self._output_dir = output_dir self._handle = None self._log = None def _has_started(self): return self._handle and self._handle.pid and self._handle.poll() is None def _is_running(self): return self._has_started() @abc.abstractmethod def _is_accessible(self): """process is accessible for queries""" @abc.abstractmethod def _start(self): """start process""" def start(self, max_wait_limit=5): if self._is_running(): return True self._log = open(os.path.join(self._output_dir, self._name + '.log'), 'a') self._handle = self._start() max_wait_limit *= self._context.timeout_multiplier for _ in range(max_wait_limit): assert self._has_started(), "Process {0} is not running after being started".format(self._name) if self._is_accessible(): break time.sleep(1) else: assert False, \ "{0} instance is not available for queries after {1} seconds".format(self._name, max_wait_limit) def stop(self, kill=False, timeout=15, _=False): term = False start_time = time.time() timeout *= self._context.timeout_multiplier while self._handle and self._is_running(): if kill: self._handle.kill() elif not term: self._handle.terminate() term = True time.sleep(1) if not kill and time.time() - start_time > timeout: kill = True if self._log: self._log.close() def cancel_background(self): pass class PatroniController(AbstractController): __PORT = 5360 PATRONI_CONFIG = '{}.yml' """ starts and stops individual patronis""" def __init__(self, context, name, work_directory, output_dir, custom_config=None): super(PatroniController, self).__init__(context, 'patroni_' + name, work_directory, output_dir) PatroniController.__PORT += 1 self._data_dir = os.path.join(work_directory, 'data', name) self._connstring = None if custom_config and 'watchdog' in custom_config: self.watchdog = WatchdogMonitor(name, work_directory, output_dir) custom_config['watchdog'] = {'driver': 'testing', 'device': self.watchdog.fifo_path, 'mode': 'required'} else: self.watchdog = None self._scope = (custom_config or {}).get('scope', 'batman') self._citus_group = (custom_config or {}).get('citus', {}).get('group') self._config = self._make_patroni_test_config(name, custom_config) self._closables = [] self._conn = None self._curs = None def write_label(self, content): with open(os.path.join(self._data_dir, 'label'), 'w') as f: f.write(content) def read_label(self, label): try: with open(os.path.join(self._data_dir, label), 'r') as f: return f.read().strip() except IOError: return None @staticmethod def recursive_update(dst, src): for k, v in src.items(): if k in dst and isinstance(dst[k], dict): PatroniController.recursive_update(dst[k], v) else: dst[k] = v def update_config(self, custom_config): with open(self._config) as r: config = yaml.safe_load(r) self.recursive_update(config, custom_config) with open(self._config, 'w') as w: yaml.safe_dump(config, w, default_flow_style=False) self._scope = config.get('scope', 'batman') def add_tag_to_config(self, tag, value): self.update_config({'tags': {tag: value}}) def _start(self): if self.watchdog: self.watchdog.start() env = os.environ.copy() if isinstance(self._context.dcs_ctl, KubernetesController): self._context.dcs_ctl.create_pod(self._name[8:], self._scope, self._citus_group) env['PATRONI_KUBERNETES_POD_IP'] = '10.0.0.' + self._name[-1] if os.name == 'nt': env['BEHAVE_DEBUG'] = 'true' patroni = subprocess.Popen([sys.executable, '-m', 'coverage', 'run', '--source=patroni', '-p', 'patroni.py', self._config], env=env, stdout=self._log, stderr=subprocess.STDOUT, cwd=self._work_directory) if os.name == 'nt': patroni.terminate = self.terminate return patroni def terminate(self): try: self._context.request_executor.request('POST', self._restapi_url + '/sigterm') except Exception: pass def stop(self, kill=False, timeout=15, postgres=False): if postgres: mode = 'i' if kill else 'f' return subprocess.call(['pg_ctl', '-D', self._data_dir, 'stop', '-m' + mode, '-w']) super(PatroniController, self).stop(kill, timeout) if isinstance(self._context.dcs_ctl, KubernetesController) and not kill: self._context.dcs_ctl.delete_pod(self._name[8:]) if self.watchdog: self.watchdog.stop() def _is_accessible(self): cursor = self.query("SELECT 1", fail_ok=True) if cursor is not None: cursor.execute("SET synchronous_commit TO 'local'") return True def _make_patroni_test_config(self, name, custom_config): patroni_config_name = self.PATRONI_CONFIG.format(name) patroni_config_path = os.path.join(self._output_dir, patroni_config_name) with open('postgres0.yml') as f: config = yaml.safe_load(f) config.pop('etcd', None) raft_port = os.environ.get('RAFT_PORT') # If patroni_raft_controller is suspended two Patroni members is enough to get a quorum, # therefore we don't want Patroni to join as a voting member when testing dcs_failsafe_mode. if raft_port and not self._output_dir.endswith('dcs_failsafe_mode'): os.environ['RAFT_PORT'] = str(int(raft_port) + 1) config['raft'] = {'data_dir': self._output_dir, 'self_addr': 'localhost:' + os.environ['RAFT_PORT']} host = config['restapi']['listen'].rsplit(':', 1)[0] config['restapi']['listen'] = config['restapi']['connect_address'] = '{}:{}'.format(host, 8008 + int(name[-1])) host = config['postgresql']['listen'].rsplit(':', 1)[0] config['postgresql']['listen'] = config['postgresql']['connect_address'] = '{0}:{1}'.format(host, self.__PORT) config['name'] = name config['postgresql']['data_dir'] = self._data_dir.replace('\\', '/') config['postgresql']['basebackup'] = [{'checkpoint': 'fast'}] config['postgresql']['callbacks'] = { 'on_role_change': '{0} features/callback2.py {1}'.format(self._context.pctl.PYTHON, name)} config['postgresql']['use_unix_socket'] = os.name != 'nt' # windows doesn't yet support unix-domain sockets config['postgresql']['use_unix_socket_repl'] = os.name != 'nt' config['postgresql']['pgpass'] = os.path.join(tempfile.gettempdir(), 'pgpass_' + name).replace('\\', '/') config['postgresql']['parameters'].update({ 'logging_collector': 'on', 'log_destination': 'csvlog', 'log_directory': self._output_dir.replace('\\', '/'), 'log_filename': name + '.log', 'log_statement': 'all', 'log_min_messages': 'debug1', 'shared_buffers': '1MB', 'unix_socket_directories': tempfile.gettempdir().replace('\\', '/')}) config['postgresql']['pg_hba'] = [ 'local all all trust', 'local replication all trust', 'host replication replicator all md5', 'host all all all md5' ] if self._context.postgres_supports_ssl and self._context.certfile: config['postgresql']['parameters'].update({ 'ssl': 'on', 'ssl_ca_file': self._context.certfile.replace('\\', '/'), 'ssl_cert_file': self._context.certfile.replace('\\', '/'), 'ssl_key_file': self._context.keyfile.replace('\\', '/') }) for user in config['postgresql'].get('authentication').keys(): config['postgresql'].get('authentication', {}).get(user, {}).update({ 'sslmode': 'verify-ca', 'sslrootcert': self._context.certfile, 'sslcert': self._context.certfile, 'sslkey': self._context.keyfile }) for i, line in enumerate(list(config['postgresql']['pg_hba'])): if line.endswith('md5'): # we want to verify client cert first and than password config['postgresql']['pg_hba'][i] = 'hostssl' + line[4:] + ' clientcert=verify-ca' if 'bootstrap' in config: config['bootstrap']['post_bootstrap'] = 'psql -w -c "SELECT 1"' if 'initdb' in config['bootstrap']: config['bootstrap']['initdb'].extend([{'auth': 'md5'}, {'auth-host': 'md5'}]) if custom_config is not None: self.recursive_update(config, custom_config) self.recursive_update(config, { 'log': { 'format': '%(asctime)s %(levelname)s [%(pathname)s:%(lineno)d - %(funcName)s]: %(message)s', 'loggers': {'patroni.postgresql.callback_executor': 'DEBUG'} }, 'bootstrap': { 'dcs': { 'loop_wait': 2, 'postgresql': { 'parameters': { 'wal_keep_segments': 100, 'archive_mode': 'on', 'archive_command': (PatroniPoolController.ARCHIVE_RESTORE_SCRIPT + ' --mode archive ' + '--dirname {} --filename %f --pathname %p').format( os.path.join(self._work_directory, 'data', 'wal_archive')) } } } } }) if config['postgresql'].get('callbacks', {}).get('on_role_change'): config['postgresql']['callbacks']['on_role_change'] += ' ' + str(self.__PORT) with open(patroni_config_path, 'w') as f: yaml.safe_dump(config, f, default_flow_style=False) self._connkwargs = config['postgresql'].get('authentication', config['postgresql']).get('superuser', {}) self._connkwargs.update({'host': host, 'port': self.__PORT, 'dbname': 'postgres', 'user': self._connkwargs.pop('username', None)}) self._replication = config['postgresql'].get('authentication', config['postgresql']).get('replication', {}) self._replication.update({'host': host, 'port': self.__PORT, 'user': self._replication.pop('username', None)}) self._restapi_url = 'http://{0}'.format(config['restapi']['connect_address']) if self._context.certfile: self._restapi_url = self._restapi_url.replace('http://', 'https://') return patroni_config_path def _connection(self): if not self._conn or self._conn.closed != 0: self._conn = psycopg.connect(**self._connkwargs) return self._conn def _cursor(self): if not self._curs or self._curs.closed or self._curs.connection.closed != 0: self._curs = self._connection().cursor() return self._curs def query(self, query, fail_ok=False): try: cursor = self._cursor() cursor.execute(query) return cursor except psycopg.Error: if not fail_ok: raise def check_role_has_changed_to(self, new_role, timeout=10): bound_time = time.time() + timeout recovery_status = new_role != 'primary' while time.time() < bound_time: cur = self.query("SELECT pg_is_in_recovery()", fail_ok=True) if cur: row = cur.fetchone() if row and row[0] == recovery_status: return True time.sleep(1) return False def get_watchdog(self): return self.watchdog def _get_pid(self): try: pidfile = os.path.join(self._data_dir, 'postmaster.pid') if not os.path.exists(pidfile): return None return int(open(pidfile).readline().strip()) except Exception: return None def patroni_hang(self, timeout): hang = ProcessHang(self._handle.pid, timeout) self._closables.append(hang) hang.start() def cancel_background(self): for obj in self._closables: obj.close() self._closables = [] @property def backup_source(self): def escape(value): return re.sub(r'([\'\\ ])', r'\\\1', str(value)) return ' '.join('{0}={1}'.format(k, escape(v)) for k, v in self._replication.items()) def backup(self, dest=os.path.join('data', 'basebackup')): subprocess.call(PatroniPoolController.BACKUP_SCRIPT + ['--walmethod=none', '--datadir=' + os.path.join(self._work_directory, dest), '--dbname=' + self.backup_source]) def read_patroni_log(self, level): try: with open(str(os.path.join(self._output_dir or '', self._name + ".log"))) as f: return [line for line in f.readlines() if line[24:24 + len(level)] == level] except IOError: return [] class ProcessHang(object): """A background thread implementing a cancelable process hang via SIGSTOP.""" def __init__(self, pid, timeout): self._cancelled = threading.Event() self._thread = threading.Thread(target=self.run) self.pid = pid self.timeout = timeout def start(self): self._thread.start() def run(self): os.kill(self.pid, signal.SIGSTOP) try: self._cancelled.wait(self.timeout) finally: os.kill(self.pid, signal.SIGCONT) def close(self): self._cancelled.set() self._thread.join() class AbstractDcsController(AbstractController): _CLUSTER_NODE = '/service/{0}' def __init__(self, context, mktemp=True): work_directory = mktemp and tempfile.mkdtemp() or None self._paused = False super(AbstractDcsController, self).__init__(context, self.name(), work_directory, context.pctl.output_dir) def _is_accessible(self): return self._is_running() def stop(self, kill=False, timeout=15): """ terminate process and wipe out the temp work directory, but only if we actually started it""" super(AbstractDcsController, self).stop(kill=kill, timeout=timeout) if self._work_directory: shutil.rmtree(self._work_directory) def path(self, key=None, scope='batman', group=None): citus_group = '/{0}'.format(group) if group is not None else '' return self._CLUSTER_NODE.format(scope) + citus_group + (key and '/' + key or '') def start_outage(self): if not self._paused and self._handle: self._handle.suspend() self._paused = True def stop_outage(self): if self._paused and self._handle: self._handle.resume() self._paused = False @abc.abstractmethod def query(self, key, scope='batman', group=None): """ query for a value of a given key """ @abc.abstractmethod def cleanup_service_tree(self): """ clean all contents stored in the tree used for the tests """ @classmethod def get_subclasses(cls): for subclass in cls.__subclasses__(): for subsubclass in subclass.get_subclasses(): yield subsubclass yield subclass @classmethod def name(cls): return cls.__name__[:-10].lower() class ConsulController(AbstractDcsController): def __init__(self, context): super(ConsulController, self).__init__(context) os.environ['PATRONI_CONSUL_HOST'] = 'localhost:8500' os.environ['PATRONI_CONSUL_REGISTER_SERVICE'] = 'on' self._config_file = None import consul self._client = consul.Consul() def _start(self): self._config_file = self._work_directory + '.json' with open(self._config_file, 'wb') as f: f.write(b'{"session_ttl_min":"5s","server":true,"bootstrap":true,"advertise_addr":"127.0.0.1"}') return psutil.Popen(['consul', 'agent', '-config-file', self._config_file, '-data-dir', self._work_directory], stdout=self._log, stderr=subprocess.STDOUT) def stop(self, kill=False, timeout=15): super(ConsulController, self).stop(kill=kill, timeout=timeout) if self._config_file: os.unlink(self._config_file) def _is_running(self): try: return bool(self._client.status.leader()) except Exception: return False def path(self, key=None, scope='batman', group=None): return super(ConsulController, self).path(key, scope, group)[1:] def query(self, key, scope='batman', group=None): _, value = self._client.kv.get(self.path(key, scope, group)) return value and value['Value'].decode('utf-8') def cleanup_service_tree(self): self._client.kv.delete(self.path(scope=''), recurse=True) def start(self, max_wait_limit=15): super(ConsulController, self).start(max_wait_limit) class AbstractEtcdController(AbstractDcsController): """ handles all etcd related tasks, used for the tests setup and cleanup """ def __init__(self, context, client_cls): super(AbstractEtcdController, self).__init__(context) self._client_cls = client_cls def _start(self): return psutil.Popen(["etcd", "--enable-v2=true", "--data-dir", self._work_directory], stdout=self._log, stderr=subprocess.STDOUT) def _is_running(self): from patroni.dcs.etcd import DnsCachingResolver # if etcd is running, but we didn't start it try: self._client = self._client_cls({'host': 'localhost', 'port': 2379, 'retry_timeout': 30, 'patronictl': 1}, DnsCachingResolver()) return True except Exception: return False class EtcdController(AbstractEtcdController): def __init__(self, context): from patroni.dcs.etcd import EtcdClient super(EtcdController, self).__init__(context, EtcdClient) os.environ['PATRONI_ETCD_HOST'] = 'localhost:2379' def query(self, key, scope='batman', group=None): import etcd try: return self._client.get(self.path(key, scope, group)).value except etcd.EtcdKeyNotFound: return None def cleanup_service_tree(self): import etcd try: self._client.delete(self.path(scope=''), recursive=True) except (etcd.EtcdKeyNotFound, etcd.EtcdConnectionFailed): return except Exception as e: assert False, "exception when cleaning up etcd contents: {0}".format(e) class Etcd3Controller(AbstractEtcdController): def __init__(self, context): from patroni.dcs.etcd3 import Etcd3Client super(Etcd3Controller, self).__init__(context, Etcd3Client) os.environ['PATRONI_ETCD3_HOST'] = 'localhost:2379' def query(self, key, scope='batman', group=None): import base64 response = self._client.range(self.path(key, scope, group)) for k in response.get('kvs', []): return base64.b64decode(k['value']).decode('utf-8') if 'value' in k else None def cleanup_service_tree(self): try: self._client.deleteprefix(self.path(scope='')) except Exception as e: assert False, "exception when cleaning up etcd contents: {0}".format(e) class AbstractExternalDcsController(AbstractDcsController): def __init__(self, context, mktemp=True): super(AbstractExternalDcsController, self).__init__(context, mktemp) self._wrapper = ['sudo'] def _start(self): return self._external_pid def start_outage(self): if not self._paused: subprocess.call(self._wrapper + ['kill', '-SIGSTOP', self._external_pid]) self._paused = True def stop_outage(self): if self._paused: subprocess.call(self._wrapper + ['kill', '-SIGCONT', self._external_pid]) self._paused = False def _has_started(self): return True @abc.abstractmethod def process_name(): """process name to search with pgrep""" def _is_running(self): if not self._handle: self._external_pid = subprocess.check_output(['pgrep', '-nf', self.process_name()]).decode('utf-8').strip() return False return True def stop(self): pass class KubernetesController(AbstractExternalDcsController): def __init__(self, context): super(KubernetesController, self).__init__(context) self._namespace = 'default' self._labels = {"application": "patroni"} self._label_selector = ','.join('{0}={1}'.format(k, v) for k, v in self._labels.items()) os.environ['PATRONI_KUBERNETES_LABELS'] = json.dumps(self._labels) os.environ['PATRONI_KUBERNETES_USE_ENDPOINTS'] = 'true' os.environ.setdefault('PATRONI_KUBERNETES_BYPASS_API_SERVICE', 'true') from patroni.dcs.kubernetes import k8s_client, k8s_config k8s_config.load_kube_config(context=os.environ.setdefault('PATRONI_KUBERNETES_CONTEXT', 'kind-kind')) self._client = k8s_client self._api = self._client.CoreV1Api() def process_name(self): return "localkube" def _is_running(self): if not self._handle: context = os.environ.get('PATRONI_KUBERNETES_CONTEXT') if context.startswith('kind-'): container = '{0}-control-plane'.format(context[5:]) api_process = 'kube-apiserver' elif context.startswith('k3d-'): container = '{0}-server-0'.format(context) api_process = 'k3s server' else: return super(KubernetesController, self)._is_running() try: docker = 'docker' with open(os.devnull, 'w') as null: if subprocess.call([docker, 'info'], stdout=null, stderr=null) != 0: raise Exception except Exception: docker = 'podman' with open(os.devnull, 'w') as null: if subprocess.call([docker, 'info'], stdout=null, stderr=null) != 0: raise Exception self._wrapper = [docker, 'exec', container] self._external_pid = subprocess.check_output(self._wrapper + ['pidof', api_process]).decode('utf-8').strip() return False return True def create_pod(self, name, scope, group=None): self.delete_pod(name) labels = self._labels.copy() labels['cluster-name'] = scope if group is not None: labels['citus-group'] = str(group) metadata = self._client.V1ObjectMeta(namespace=self._namespace, name=name, labels=labels) spec = self._client.V1PodSpec(containers=[self._client.V1Container(name=name, image='empty')]) body = self._client.V1Pod(metadata=metadata, spec=spec) self._api.create_namespaced_pod(self._namespace, body) def delete_pod(self, name): try: self._api.delete_namespaced_pod(name, self._namespace, body=self._client.V1DeleteOptions()) except Exception: pass while True: try: self._api.read_namespaced_pod(name, self._namespace) except Exception: break def query(self, key, scope='batman', group=None): if key.startswith('members/'): pod = self._api.read_namespaced_pod(key[8:], self._namespace) return (pod.metadata.annotations or {}).get('status', '') else: try: if group is not None: scope = '{0}-{1}'.format(scope, group) rkey = 'leader' if key in ('status', 'failsafe') else key ep = scope + {'leader': '', 'history': '-config', 'initialize': '-config'}.get(rkey, '-' + rkey) e = self._api.read_namespaced_endpoints(ep, self._namespace) if key not in ('sync', 'status', 'failsafe'): return e.metadata.annotations[key] else: return json.dumps(e.metadata.annotations) except Exception: return None def cleanup_service_tree(self): try: self._api.delete_collection_namespaced_pod(self._namespace, label_selector=self._label_selector) except Exception: pass try: self._api.delete_collection_namespaced_endpoints(self._namespace, label_selector=self._label_selector) except Exception: pass while True: result = self._api.list_namespaced_pod(self._namespace, label_selector=self._label_selector) if len(result.items) < 1: break class ZooKeeperController(AbstractExternalDcsController): """ handles all zookeeper related tasks, used for the tests setup and cleanup """ def __init__(self, context, export_env=True): super(ZooKeeperController, self).__init__(context, False) if export_env: os.environ['PATRONI_ZOOKEEPER_HOSTS'] = "'localhost:2181'" import kazoo.client self._client = kazoo.client.KazooClient() def process_name(self): return "java .*zookeeper" def query(self, key, scope='batman', group=None): import kazoo.exceptions try: return self._client.get(self.path(key, scope, group))[0].decode('utf-8') except kazoo.exceptions.NoNodeError: return None def cleanup_service_tree(self): import kazoo.exceptions try: self._client.delete(self.path(scope=''), recursive=True) except (kazoo.exceptions.NoNodeError): return except Exception as e: assert False, "exception when cleaning up zookeeper contents: {0}".format(e) def _is_running(self): if not super(ZooKeeperController, self)._is_running(): return False # if zookeeper is running, but we didn't start it if self._client.connected: return True try: return self._client.start(1) or True except Exception: return False class MockExhibitor(BaseHTTPRequestHandler): def do_GET(self): self.send_response(200) self.end_headers() self.wfile.write(b'{"servers":["127.0.0.1"],"port":2181}') def log_message(self, fmt, *args): pass class ExhibitorController(ZooKeeperController): def __init__(self, context): super(ExhibitorController, self).__init__(context, False) port = 8181 exhibitor = HTTPServer(('', port), MockExhibitor) exhibitor.daemon_thread = True exhibitor_thread = threading.Thread(target=exhibitor.serve_forever) exhibitor_thread.daemon = True exhibitor_thread.start() os.environ.update({'PATRONI_EXHIBITOR_HOSTS': 'localhost', 'PATRONI_EXHIBITOR_PORT': str(port)}) class RaftController(AbstractDcsController): CONTROLLER_ADDR = 'localhost:1234' PASSWORD = '12345' def __init__(self, context): super(RaftController, self).__init__(context) os.environ.update(PATRONI_RAFT_PARTNER_ADDRS="'" + self.CONTROLLER_ADDR + "'", PATRONI_RAFT_PASSWORD=self.PASSWORD, RAFT_PORT='1234') self._raft = None def _start(self): env = os.environ.copy() del env['PATRONI_RAFT_PARTNER_ADDRS'] env['PATRONI_RAFT_SELF_ADDR'] = self.CONTROLLER_ADDR env['PATRONI_RAFT_DATA_DIR'] = self._work_directory return psutil.Popen([sys.executable, '-m', 'coverage', 'run', '--source=patroni', '-p', 'patroni_raft_controller.py'], stdout=self._log, stderr=subprocess.STDOUT, env=env) def query(self, key, scope='batman', group=None): ret = self._raft.get(self.path(key, scope, group)) return ret and ret['value'] def set(self, key, value): self._raft.set(self.path(key), value) def cleanup_service_tree(self): from patroni.dcs.raft import KVStoreTTL if self._raft: self._raft.destroy() self.stop() os.makedirs(self._work_directory) self.start() ready_event = threading.Event() self._raft = KVStoreTTL(ready_event.set, None, None, partner_addrs=[self.CONTROLLER_ADDR], password=self.PASSWORD) self._raft.startAutoTick() ready_event.wait() class PatroniPoolController(object): PYTHON = sys.executable.replace('\\', '/') BACKUP_SCRIPT = [PYTHON, 'features/backup_create.py'] BACKUP_RESTORE_SCRIPT = ' '.join((PYTHON, os.path.abspath('features/backup_restore.py'))).replace('\\', '/') ARCHIVE_RESTORE_SCRIPT = ' '.join((PYTHON, os.path.abspath('features/archive-restore.py'))) def __init__(self, context): self._context = context self._dcs = None self._output_dir = None self._patroni_path = None self._processes = {} self.create_and_set_output_directory('') self._check_postgres_ssl() self.known_dcs = {subclass.name(): subclass for subclass in AbstractDcsController.get_subclasses()} def _check_postgres_ssl(self): try: subprocess.check_output(['postgres', '-D', os.devnull, '-c', 'ssl=on'], stderr=subprocess.STDOUT) raise Exception # this one should never happen because the previous line will always raise and exception except Exception as e: self._context.postgres_supports_ssl = isinstance(e, subprocess.CalledProcessError)\ and 'SSL is not supported by this build' not in e.output.decode() @property def patroni_path(self): if self._patroni_path is None: cwd = os.path.realpath(__file__) while True: cwd, entry = os.path.split(cwd) if entry == 'features' or cwd == '/': break self._patroni_path = cwd return self._patroni_path @property def output_dir(self): return self._output_dir def start(self, name, max_wait_limit=40, custom_config=None): if name not in self._processes: self._processes[name] = PatroniController(self._context, name, self.patroni_path, self._output_dir, custom_config) self._processes[name].start(max_wait_limit) def __getattr__(self, func): if func not in ['stop', 'query', 'write_label', 'read_label', 'check_role_has_changed_to', 'add_tag_to_config', 'get_watchdog', 'patroni_hang', 'backup', 'read_patroni_log']: raise AttributeError("PatroniPoolController instance has no attribute '{0}'".format(func)) def wrapper(name, *args, **kwargs): return getattr(self._processes[name], func)(*args, **kwargs) return wrapper def stop_all(self): for ctl in self._processes.values(): ctl.cancel_background() ctl.stop() self._processes.clear() def create_and_set_output_directory(self, feature_name): feature_dir = os.path.join(self.patroni_path, 'features', 'output', feature_name.replace(' ', '_')) if os.path.exists(feature_dir): shutil.rmtree(feature_dir) os.makedirs(feature_dir) self._output_dir = feature_dir def clone(self, from_name, cluster_name, to_name): f = self._processes[from_name] custom_config = { 'scope': cluster_name, 'bootstrap': { 'method': 'pg_basebackup', 'pg_basebackup': { 'command': " ".join(self.BACKUP_SCRIPT + ['--walmethod=stream', '--dbname="{0}"'.format(f.backup_source)]) }, 'dcs': { 'postgresql': { 'parameters': { 'max_connections': 101 } } } }, 'postgresql': { 'parameters': { 'archive_mode': 'on', 'archive_command': (self.ARCHIVE_RESTORE_SCRIPT + ' --mode archive ' + '--dirname {} --filename %f --pathname %p') .format(os.path.join(self.patroni_path, 'data', 'wal_archive_clone').replace('\\', '/')) }, 'authentication': { 'superuser': {'password': 'zalando1'}, 'replication': {'password': 'rep-pass1'} } } } self.start(to_name, custom_config=custom_config) def backup_restore_config(self, params=None): return { 'command': (self.BACKUP_RESTORE_SCRIPT + ' --sourcedir=' + os.path.join(self.patroni_path, 'data', 'basebackup')).replace('\\', '/'), 'test-argument': 'test-value', # test config mapping approach on custom bootstrap/replica creation **(params or {}), } def bootstrap_from_backup(self, name, cluster_name): custom_config = { 'scope': cluster_name, 'bootstrap': { 'method': 'backup_restore', 'backup_restore': self.backup_restore_config({ 'recovery_conf': { 'recovery_target_action': 'promote', 'recovery_target_timeline': 'latest', 'restore_command': (self.ARCHIVE_RESTORE_SCRIPT + ' --mode restore ' + '--dirname {} --filename %f --pathname %p').format( os.path.join(self.patroni_path, 'data', 'wal_archive_clone').replace('\\', '/')) }, }) }, 'postgresql': { 'authentication': { 'superuser': {'password': 'zalando2'}, 'replication': {'password': 'rep-pass2'} } } } self.start(name, custom_config=custom_config) def bootstrap_from_backup_no_leader(self, name, cluster_name): custom_config = { 'scope': cluster_name, 'postgresql': { 'recovery_conf': { 'restore_command': (self.ARCHIVE_RESTORE_SCRIPT + ' --mode restore ' + '--dirname {} --filename %f --pathname %p') .format(os.path.join(self.patroni_path, 'data', 'wal_archive').replace('\\', '/')) }, 'create_replica_methods': ['no_leader_bootstrap'], 'no_leader_bootstrap': self.backup_restore_config({'no_leader': '1'}) } } self.start(name, custom_config=custom_config) @property def dcs(self): if self._dcs is None: self._dcs = os.environ.pop('DCS', 'etcd') assert self._dcs in self.known_dcs, 'Unsupported dcs: ' + self._dcs return self._dcs class WatchdogMonitor(object): """Testing harness for emulating a watchdog device as a named pipe. Because we can't easily emulate ioctl's we require a custom driver on Patroni side. The device takes no action, only notes if it was pinged and/or triggered. """ def __init__(self, name, work_directory, output_dir): self.fifo_path = os.path.join(work_directory, 'data', 'watchdog.{0}.fifo'.format(name)) self.fifo_file = None self._stop_requested = False # Relying on bool setting being atomic self._thread = None self.last_ping = None self.was_pinged = False self.was_closed = False self._was_triggered = False self.timeout = 60 self._log_file = open(os.path.join(output_dir, 'watchdog.{0}.log'.format(name)), 'w') self._log("watchdog {0} initialized".format(name)) def _log(self, msg): tstamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S,%f") self._log_file.write("{0}: {1}\n".format(tstamp, msg)) def start(self): assert self._thread is None self._stop_requested = False self._log("starting fifo {0}".format(self.fifo_path)) fifo_dir = os.path.dirname(self.fifo_path) if os.path.exists(self.fifo_path): os.unlink(self.fifo_path) elif not os.path.exists(fifo_dir): os.mkdir(fifo_dir) os.mkfifo(self.fifo_path) self.last_ping = time.time() self._thread = threading.Thread(target=self.run) self._thread.start() def run(self): try: while not self._stop_requested: self._log("opening") self.fifo_file = os.open(self.fifo_path, os.O_RDONLY) try: self._log("Fifo {0} connected".format(self.fifo_path)) self.was_closed = False while not self._stop_requested: c = os.read(self.fifo_file, 1) if c == b'X': self._log("Stop requested") return elif c == b'': self._log("Pipe closed") break elif c == b'C': command = b'' c = os.read(self.fifo_file, 1) while c != b'\n' and c != b'': command += c c = os.read(self.fifo_file, 1) command = command.decode('utf8') if command.startswith('timeout='): self.timeout = int(command.split('=')[1]) self._log("timeout={0}".format(self.timeout)) elif c in [b'V', b'1']: cur_time = time.time() if cur_time - self.last_ping > self.timeout: self._log("Triggered") self._was_triggered = True if c == b'V': self._log("magic close") self.was_closed = True elif c == b'1': self.was_pinged = True self._log("ping after {0} seconds".format(cur_time - (self.last_ping or cur_time))) self.last_ping = cur_time else: self._log('Unknown command {0} received from fifo'.format(c)) finally: self.was_closed = True self._log("closing") os.close(self.fifo_file) except Exception as e: self._log("Error {0}".format(e)) finally: self._log("stopping") self._log_file.flush() if os.path.exists(self.fifo_path): os.unlink(self.fifo_path) def stop(self): self._log("Monitor stop") self._stop_requested = True try: if os.path.exists(self.fifo_path): fd = os.open(self.fifo_path, os.O_WRONLY) os.write(fd, b'X') os.close(fd) except Exception as e: self._log("err while closing: {0}".format(str(e))) if self._thread: self._thread.join() self._thread = None def reset(self): self._log("reset") self.was_pinged = self.was_closed = self._was_triggered = False @property def was_triggered(self): delta = time.time() - self.last_ping triggered = self._was_triggered or not self.was_closed and delta > self.timeout self._log("triggered={0}, {1}s left".format(triggered, self.timeout - delta)) return triggered # actions to execute on start/stop of the tests and before running individual features def before_all(context): context.ci = os.name == 'nt' or\ any(a in os.environ for a in ('TRAVIS_BUILD_NUMBER', 'BUILD_NUMBER', 'GITHUB_ACTIONS')) context.timeout_multiplier = 5 if context.ci else 1 # MacOS sometimes is VERY slow context.pctl = PatroniPoolController(context) context.keyfile = os.path.join(context.pctl.output_dir, 'patroni.key') context.certfile = os.path.join(context.pctl.output_dir, 'patroni.crt') try: if sys.platform == 'darwin' and 'GITHUB_ACTIONS' in os.environ: raise Exception with open(os.devnull, 'w') as null: ret = subprocess.call(['openssl', 'req', '-nodes', '-new', '-x509', '-subj', '/CN=batman.patroni', '-addext', 'subjectAltName=IP:127.0.0.1', '-keyout', context.keyfile, '-out', context.certfile], stdout=null, stderr=null) if ret != 0: raise Exception os.chmod(context.keyfile, stat.S_IWRITE | stat.S_IREAD) except Exception: context.keyfile = context.certfile = None os.environ.update({'PATRONI_RESTAPI_USERNAME': 'username', 'PATRONI_RESTAPI_PASSWORD': 'password'}) ctl = {'auth': os.environ['PATRONI_RESTAPI_USERNAME'] + ':' + os.environ['PATRONI_RESTAPI_PASSWORD']} if context.certfile: os.environ.update({'PATRONI_RESTAPI_CAFILE': context.certfile, 'PATRONI_RESTAPI_CERTFILE': context.certfile, 'PATRONI_RESTAPI_KEYFILE': context.keyfile, 'PATRONI_RESTAPI_VERIFY_CLIENT': 'required', 'PATRONI_CTL_INSECURE': 'on', 'PATRONI_CTL_CERTFILE': context.certfile, 'PATRONI_CTL_KEYFILE': context.keyfile}) ctl.update({'cacert': context.certfile, 'certfile': context.certfile, 'keyfile': context.keyfile}) context.request_executor = PatroniRequest({'ctl': ctl}, True) context.dcs_ctl = context.pctl.known_dcs[context.pctl.dcs](context) context.dcs_ctl.start() try: context.dcs_ctl.cleanup_service_tree() except AssertionError: # after_all handlers won't be executed in before_all context.dcs_ctl.stop() raise def after_all(context): context.dcs_ctl.stop() subprocess.call([sys.executable, '-m', 'coverage', 'combine']) subprocess.call([sys.executable, '-m', 'coverage', 'report']) def before_feature(context, feature): """ create per-feature output directory to collect Patroni and PostgreSQL logs """ if feature.name == 'watchdog' and os.name == 'nt': return feature.skip("Watchdog isn't supported on Windows") elif feature.name == 'citus': lib = subprocess.check_output(['pg_config', '--pkglibdir']).decode('utf-8').strip() if not os.path.exists(os.path.join(lib, 'citus.so')): return feature.skip("Citus extenstion isn't available") context.pctl.create_and_set_output_directory(feature.name) def after_feature(context, feature): """ send SIGCONT to a dcs if neccessary, stop all Patronis remove their data directory and cleanup the keys in etcd """ context.dcs_ctl.stop_outage() context.pctl.stop_all() data = os.path.join(context.pctl.patroni_path, 'data') if os.path.exists(data): shutil.rmtree(data) context.dcs_ctl.cleanup_service_tree() found = False logs = glob.glob(context.pctl.output_dir + '/patroni_*.log') for log in logs: with open(log) as f: for line in f: if 'please report it as a BUG' in line: print(':'.join([log, line.rstrip()])) found = True if feature.status == 'failed' or found: shutil.copytree(context.pctl.output_dir, context.pctl.output_dir + '_failed') if found: raise Exception('Unexpected errors in Patroni log files') def before_scenario(context, scenario): if 'slot-advance' in scenario.effective_tags: for p in context.pctl._processes.values(): if p._conn and p._conn.server_version < 110000: scenario.skip('pg_replication_slot_advance() is not supported on {0}'.format(p._conn.server_version)) break if 'dcs-failsafe' in scenario.effective_tags and not context.dcs_ctl._handle: scenario.skip('it is not possible to control state of {0} from tests'.format(context.dcs_ctl.name())) if 'reject-duplicate-name' in scenario.effective_tags and context.dcs_ctl.name() == 'raft': scenario.skip('Flaky test with Raft') patroni-3.2.2/features/ignored_slots.feature000066400000000000000000000110361455170150700212460ustar00rootroot00000000000000Feature: ignored slots Scenario: check ignored slots aren't removed on failover/switchover Given I start postgres1 Then postgres1 is a leader after 10 seconds And there is a non empty initialize key in DCS after 15 seconds When I issue a PATCH request to http://127.0.0.1:8009/config with {"ignore_slots": [{"name": "unmanaged_slot_0", "database": "postgres", "plugin": "test_decoding", "type": "logical"}, {"name": "unmanaged_slot_1", "database": "postgres", "plugin": "test_decoding"}, {"name": "unmanaged_slot_2", "database": "postgres"}, {"name": "unmanaged_slot_3"}], "postgresql": {"parameters": {"wal_level": "logical"}}} Then I receive a response code 200 And Response on GET http://127.0.0.1:8009/config contains ignore_slots after 10 seconds # Make sure the wal_level has been changed. When I shut down postgres1 And I start postgres1 Then postgres1 is a leader after 10 seconds And "members/postgres1" key in DCS has role=master after 10 seconds # Make sure Patroni has finished telling Postgres it should be accepting writes. And postgres1 role is the primary after 20 seconds # 1. Create our test logical replication slot. # Test that ny subset of attributes in the ignore slots matcher is enough to match a slot # by using 3 different slots. When I create a logical replication slot unmanaged_slot_0 on postgres1 with the test_decoding plugin And I create a logical replication slot unmanaged_slot_1 on postgres1 with the test_decoding plugin And I create a logical replication slot unmanaged_slot_2 on postgres1 with the test_decoding plugin And I create a logical replication slot unmanaged_slot_3 on postgres1 with the test_decoding plugin And I create a logical replication slot dummy_slot on postgres1 with the test_decoding plugin # It seems like it'd be obvious that these slots exist since we just created them, # but Patroni can actually end up dropping them almost immediately, so it's helpful # to verify they exist before we begin testing whether they persist through failover # cycles. Then postgres1 has a logical replication slot named unmanaged_slot_0 with the test_decoding plugin after 2 seconds And postgres1 has a logical replication slot named unmanaged_slot_1 with the test_decoding plugin after 2 seconds And postgres1 has a logical replication slot named unmanaged_slot_2 with the test_decoding plugin after 2 seconds And postgres1 has a logical replication slot named unmanaged_slot_3 with the test_decoding plugin after 2 seconds When I start postgres0 Then "members/postgres0" key in DCS has role=replica after 10 seconds And postgres0 role is the secondary after 20 seconds # Verify that the replica has advanced beyond the point in the WAL # where we created the replication slot so that on the next failover # cycle we don't accidentally rewind to before the slot creation. And replication works from postgres1 to postgres0 after 20 seconds When I shut down postgres1 Then "members/postgres0" key in DCS has role=master after 10 seconds # 2. After a failover the server (now a replica) still has the slot. When I start postgres1 Then postgres1 role is the secondary after 20 seconds And "members/postgres1" key in DCS has role=replica after 10 seconds # give Patroni time to sync replication slots And I sleep for 2 seconds And postgres1 has a logical replication slot named unmanaged_slot_0 with the test_decoding plugin after 2 seconds And postgres1 has a logical replication slot named unmanaged_slot_1 with the test_decoding plugin after 2 seconds And postgres1 has a logical replication slot named unmanaged_slot_2 with the test_decoding plugin after 2 seconds And postgres1 has a logical replication slot named unmanaged_slot_3 with the test_decoding plugin after 2 seconds And postgres1 does not have a replication slot named dummy_slot # 3. After a failover the server (now a primary) still has the slot. When I shut down postgres0 Then "members/postgres1" key in DCS has role=master after 10 seconds And postgres1 has a logical replication slot named unmanaged_slot_0 with the test_decoding plugin after 2 seconds And postgres1 has a logical replication slot named unmanaged_slot_1 with the test_decoding plugin after 2 seconds And postgres1 has a logical replication slot named unmanaged_slot_2 with the test_decoding plugin after 2 seconds And postgres1 has a logical replication slot named unmanaged_slot_3 with the test_decoding plugin after 2 seconds patroni-3.2.2/features/patroni_api.feature000066400000000000000000000150671455170150700207100ustar00rootroot00000000000000Feature: patroni api We should check that patroni correctly responds to valid and not-valid API requests. Scenario: check API requests on a stand-alone server Given I start postgres0 And postgres0 is a leader after 10 seconds When I issue a GET request to http://127.0.0.1:8008/ Then I receive a response code 200 And I receive a response state running And I receive a response role master When I issue a GET request to http://127.0.0.1:8008/standby_leader Then I receive a response code 503 When I issue a GET request to http://127.0.0.1:8008/health Then I receive a response code 200 When I issue a GET request to http://127.0.0.1:8008/replica Then I receive a response code 503 When I issue a POST request to http://127.0.0.1:8008/reinitialize with {"force": true} Then I receive a response code 503 And I receive a response text I am the leader, can not reinitialize When I run patronictl.py switchover batman --master postgres0 --force Then I receive a response returncode 1 And I receive a response output "Error: No candidates found to switchover to" When I issue a POST request to http://127.0.0.1:8008/switchover with {"leader": "postgres0"} Then I receive a response code 412 And I receive a response text switchover is not possible: cluster does not have members except leader When I issue an empty POST request to http://127.0.0.1:8008/failover Then I receive a response code 400 When I issue a POST request to http://127.0.0.1:8008/failover with {"foo": "bar"} Then I receive a response code 400 And I receive a response text "Failover could be performed only to a specific candidate" Scenario: check local configuration reload Given I add tag new_tag new_value to postgres0 config And I issue an empty POST request to http://127.0.0.1:8008/reload Then I receive a response code 202 Scenario: check dynamic configuration change via DCS Given I issue a PATCH request to http://127.0.0.1:8008/config with {"ttl": 20, "postgresql": {"parameters": {"max_connections": "101"}}} Then I receive a response code 200 And Response on GET http://127.0.0.1:8008/patroni contains pending_restart after 11 seconds When I issue a GET request to http://127.0.0.1:8008/config Then I receive a response code 200 And I receive a response ttl 20 When I issue a GET request to http://127.0.0.1:8008/patroni Then I receive a response code 200 And I receive a response tags {'new_tag': 'new_value'} And I sleep for 4 seconds Scenario: check the scheduled restart Given I run patronictl.py edit-config -p 'superuser_reserved_connections=6' --force batman Then I receive a response returncode 0 And I receive a response output "+ superuser_reserved_connections: 6" And Response on GET http://127.0.0.1:8008/patroni contains pending_restart after 5 seconds Given I issue a scheduled restart at http://127.0.0.1:8008 in 5 seconds with {"role": "replica"} Then I receive a response code 202 And I sleep for 8 seconds And Response on GET http://127.0.0.1:8008/patroni contains pending_restart after 10 seconds Given I issue a scheduled restart at http://127.0.0.1:8008 in 5 seconds with {"restart_pending": "True"} Then I receive a response code 202 And Response on GET http://127.0.0.1:8008/patroni does not contain pending_restart after 10 seconds And postgres0 role is the primary after 10 seconds Scenario: check API requests for the primary-replica pair in the pause mode Given I start postgres1 Then replication works from postgres0 to postgres1 after 20 seconds When I run patronictl.py pause batman Then I receive a response returncode 0 When I kill postmaster on postgres1 And I issue a GET request to http://127.0.0.1:8009/replica Then I receive a response code 503 And "members/postgres1" key in DCS has state=stopped after 10 seconds When I run patronictl.py restart batman postgres1 --force Then I receive a response returncode 0 Then replication works from postgres0 to postgres1 after 20 seconds And I sleep for 2 seconds When I issue a GET request to http://127.0.0.1:8009/replica Then I receive a response code 200 And I receive a response state running And I receive a response role replica When I run patronictl.py reinit batman postgres1 --force --wait Then I receive a response returncode 0 And I receive a response output "Success: reinitialize for member postgres1" And postgres1 role is the secondary after 30 seconds And replication works from postgres0 to postgres1 after 20 seconds When I run patronictl.py restart batman postgres0 --force Then I receive a response returncode 0 And I receive a response output "Success: restart on member postgres0" And postgres0 role is the primary after 5 seconds Scenario: check the switchover via the API in the pause mode Given I issue a POST request to http://127.0.0.1:8008/switchover with {"leader": "postgres0", "candidate": "postgres1"} Then I receive a response code 200 And postgres1 is a leader after 5 seconds And postgres1 role is the primary after 10 seconds And postgres0 role is the secondary after 10 seconds And replication works from postgres1 to postgres0 after 20 seconds And "members/postgres0" key in DCS has state=running after 10 seconds When I issue a GET request to http://127.0.0.1:8008/primary Then I receive a response code 503 When I issue a GET request to http://127.0.0.1:8008/replica Then I receive a response code 200 When I issue a GET request to http://127.0.0.1:8009/primary Then I receive a response code 200 When I issue a GET request to http://127.0.0.1:8009/replica Then I receive a response code 503 Scenario: check the scheduled switchover Given I issue a scheduled switchover from postgres1 to postgres0 in 10 seconds Then I receive a response returncode 1 And I receive a response output "Can't schedule switchover in the paused state" When I run patronictl.py resume batman Then I receive a response returncode 0 Given I issue a scheduled switchover from postgres1 to postgres0 in 10 seconds Then I receive a response returncode 0 And postgres0 is a leader after 20 seconds And postgres0 role is the primary after 10 seconds And postgres1 role is the secondary after 10 seconds And replication works from postgres0 to postgres1 after 25 seconds And "members/postgres1" key in DCS has state=running after 10 seconds When I issue a GET request to http://127.0.0.1:8008/primary Then I receive a response code 200 When I issue a GET request to http://127.0.0.1:8008/replica Then I receive a response code 503 When I issue a GET request to http://127.0.0.1:8009/primary Then I receive a response code 503 When I issue a GET request to http://127.0.0.1:8009/replica Then I receive a response code 200 patroni-3.2.2/features/permanent_slots.feature000066400000000000000000000114701455170150700216120ustar00rootroot00000000000000Feature: permanent slots Scenario: check that physical permanent slots are created Given I start postgres0 Then postgres0 is a leader after 10 seconds And there is a non empty initialize key in DCS after 15 seconds When I issue a PATCH request to http://127.0.0.1:8008/config with {"slots":{"test_physical":0,"postgres0":0,"postgres1":0,"postgres3":0},"postgresql":{"parameters":{"wal_level":"logical"}}} Then I receive a response code 200 And Response on GET http://127.0.0.1:8008/config contains slots after 10 seconds When I start postgres1 And I start postgres2 And I configure and start postgres3 with a tag replicatefrom postgres2 Then postgres0 has a physical replication slot named test_physical after 10 seconds And postgres0 has a physical replication slot named postgres1 after 10 seconds And postgres0 has a physical replication slot named postgres2 after 10 seconds And postgres2 has a physical replication slot named postgres3 after 10 seconds @slot-advance Scenario: check that logical permanent slots are created Given I run patronictl.py restart batman postgres0 --force And I issue a PATCH request to http://127.0.0.1:8008/config with {"slots":{"test_logical":{"type":"logical","database":"postgres","plugin":"test_decoding"}}} Then postgres0 has a logical replication slot named test_logical with the test_decoding plugin after 10 seconds @slot-advance Scenario: check that permanent slots are created on replicas Given postgres1 has a logical replication slot named test_logical with the test_decoding plugin after 10 seconds Then Logical slot test_logical is in sync between postgres0 and postgres1 after 10 seconds And Logical slot test_logical is in sync between postgres0 and postgres2 after 10 seconds And Logical slot test_logical is in sync between postgres0 and postgres3 after 10 seconds And postgres1 has a physical replication slot named test_physical after 2 seconds And postgres2 has a physical replication slot named test_physical after 2 seconds And postgres3 has a physical replication slot named test_physical after 2 seconds @slot-advance Scenario: check permanent physical slots that match with member names Given postgres0 has a physical replication slot named postgres3 after 2 seconds And postgres1 has a physical replication slot named postgres0 after 2 seconds And postgres1 has a physical replication slot named postgres3 after 2 seconds And postgres2 has a physical replication slot named postgres0 after 2 seconds And postgres2 has a physical replication slot named postgres3 after 2 seconds And postgres2 has a physical replication slot named postgres1 after 2 seconds And postgres1 does not have a replication slot named postgres2 And postgres3 does not have a replication slot named postgres2 @slot-advance Scenario: check that permanent slots are advanced on replicas Given I add the table replicate_me to postgres0 When I get all changes from logical slot test_logical on postgres0 And I get all changes from physical slot test_physical on postgres0 Then Logical slot test_logical is in sync between postgres0 and postgres1 after 10 seconds And Physical slot test_physical is in sync between postgres0 and postgres1 after 10 seconds And Logical slot test_logical is in sync between postgres0 and postgres2 after 10 seconds And Physical slot test_physical is in sync between postgres0 and postgres2 after 10 seconds And Logical slot test_logical is in sync between postgres0 and postgres3 after 10 seconds And Physical slot test_physical is in sync between postgres0 and postgres3 after 10 seconds And Physical slot postgres1 is in sync between postgres0 and postgres2 after 10 seconds And Physical slot postgres3 is in sync between postgres2 and postgres0 after 20 seconds And Physical slot postgres3 is in sync between postgres2 and postgres1 after 10 seconds And postgres1 does not have a replication slot named postgres2 And postgres3 does not have a replication slot named postgres2 @slot-advance Scenario: check that only permanent slots are written to the /status key Given "status" key in DCS has test_physical in slots And "status" key in DCS has postgres0 in slots And "status" key in DCS has postgres1 in slots And "status" key in DCS does not have postgres2 in slots And "status" key in DCS has postgres3 in slots Scenario: check permanent physical replication slot after failover Given I shut down postgres3 And I shut down postgres2 And I shut down postgres0 Then postgres1 has a physical replication slot named test_physical after 10 seconds And postgres1 has a physical replication slot named postgres0 after 10 seconds And postgres1 has a physical replication slot named postgres3 after 10 seconds patroni-3.2.2/features/priority_failover.feature000066400000000000000000000052731455170150700221510ustar00rootroot00000000000000Feature: priority replication We should check that we can give nodes priority during failover Scenario: check failover priority 0 prevents leaderships Given I configure and start postgres0 with a tag failover_priority 1 And I configure and start postgres1 with a tag failover_priority 0 Then replication works from postgres0 to postgres1 after 20 seconds When I shut down postgres0 And there is one of ["following a different leader because I am not allowed to promote"] INFO in the postgres1 patroni log after 5 seconds Then postgres1 role is the secondary after 10 seconds When I start postgres0 Then postgres0 role is the primary after 10 seconds Scenario: check higher failover priority is respected Given I configure and start postgres2 with a tag failover_priority 1 And I configure and start postgres3 with a tag failover_priority 2 Then replication works from postgres0 to postgres2 after 20 seconds And replication works from postgres0 to postgres3 after 20 seconds When I shut down postgres0 Then postgres3 role is the primary after 10 seconds And there is one of ["postgres3 has equally tolerable WAL position and priority 2, while this node has priority 1","Wal position of postgres3 is ahead of my wal position"] INFO in the postgres2 patroni log after 5 seconds Scenario: check conflicting configuration handling When I set nofailover tag in postgres2 config And I issue an empty POST request to http://127.0.0.1:8010/reload Then I receive a response code 202 And there is one of ["Conflicting configuration between nofailover: True and failover_priority: 1. Defaulting to nofailover: True"] WARNING in the postgres2 patroni log after 5 seconds And "members/postgres2" key in DCS has tags={'failover_priority': '1', 'nofailover': True} after 10 seconds When I issue a POST request to http://127.0.0.1:8010/failover with {"candidate": "postgres2"} Then I receive a response code 412 And I receive a response text "failover is not possible: no good candidates have been found" When I reset nofailover tag in postgres1 config And I issue an empty POST request to http://127.0.0.1:8009/reload Then I receive a response code 202 And there is one of ["Conflicting configuration between nofailover: False and failover_priority: 0. Defaulting to nofailover: False"] WARNING in the postgres1 patroni log after 5 seconds And "members/postgres1" key in DCS has tags={'failover_priority': '0', 'nofailover': False} after 10 seconds And I issue a POST request to http://127.0.0.1:8009/failover with {"candidate": "postgres1"} Then I receive a response code 200 And postgres1 role is the primary after 10 seconds patroni-3.2.2/features/recovery.feature000066400000000000000000000024241455170150700202320ustar00rootroot00000000000000Feature: recovery We want to check that crashed postgres is started back Scenario: check that timeline is not incremented when primary is started after crash Given I start postgres0 Then postgres0 is a leader after 10 seconds And there is a non empty initialize key in DCS after 15 seconds When I start postgres1 And I add the table foo to postgres0 Then table foo is present on postgres1 after 20 seconds When I kill postmaster on postgres0 Then postgres0 role is the primary after 10 seconds When I issue a GET request to http://127.0.0.1:8008/ Then I receive a response code 200 And I receive a response role master And I receive a response timeline 1 And "members/postgres0" key in DCS has state=running after 12 seconds And replication works from postgres0 to postgres1 after 15 seconds Scenario: check immediate failover when master_start_timeout=0 Given I issue a PATCH request to http://127.0.0.1:8008/config with {"master_start_timeout": 0} Then I receive a response code 200 And Response on GET http://127.0.0.1:8008/config contains master_start_timeout after 10 seconds When I kill postmaster on postgres0 Then postgres1 is a leader after 10 seconds And postgres1 role is the primary after 10 seconds patroni-3.2.2/features/standby_cluster.feature000066400000000000000000000076371455170150700216140ustar00rootroot00000000000000Feature: standby cluster Scenario: prepare the cluster with logical slots Given I start postgres1 Then postgres1 is a leader after 10 seconds And there is a non empty initialize key in DCS after 15 seconds When I issue a PATCH request to http://127.0.0.1:8009/config with {"slots": {"pm_1": {"type": "physical"}}, "postgresql": {"parameters": {"wal_level": "logical"}}} Then I receive a response code 200 And Response on GET http://127.0.0.1:8009/config contains slots after 10 seconds And I sleep for 3 seconds When I issue a PATCH request to http://127.0.0.1:8009/config with {"slots": {"test_logical": {"type": "logical", "database": "postgres", "plugin": "test_decoding"}}} Then I receive a response code 200 And I do a backup of postgres1 When I start postgres0 Then "members/postgres0" key in DCS has state=running after 10 seconds And replication works from postgres1 to postgres0 after 15 seconds When I issue a GET request to http://127.0.0.1:8008/patroni Then I receive a response code 200 And I receive a response replication_state streaming And "members/postgres0" key in DCS has replication_state=streaming after 10 seconds @slot-advance Scenario: check permanent logical slots are synced to the replica Given I run patronictl.py restart batman postgres1 --force Then Logical slot test_logical is in sync between postgres0 and postgres1 after 10 seconds Scenario: Detach exiting node from the cluster When I shut down postgres1 Then postgres0 is a leader after 10 seconds And "members/postgres0" key in DCS has role=master after 3 seconds When I issue a GET request to http://127.0.0.1:8008/ Then I receive a response code 200 Scenario: check replication of a single table in a standby cluster Given I start postgres1 in a standby cluster batman1 as a clone of postgres0 Then postgres1 is a leader of batman1 after 10 seconds When I add the table foo to postgres0 Then table foo is present on postgres1 after 20 seconds When I issue a GET request to http://127.0.0.1:8009/patroni Then I receive a response code 200 And I receive a response replication_state streaming And I sleep for 3 seconds When I issue a GET request to http://127.0.0.1:8009/primary Then I receive a response code 503 When I issue a GET request to http://127.0.0.1:8009/standby_leader Then I receive a response code 200 And I receive a response role standby_leader And there is a postgres1_cb.log with "on_role_change standby_leader batman1" in postgres1 data directory When I start postgres2 in a cluster batman1 Then postgres2 role is the replica after 24 seconds And table foo is present on postgres2 after 20 seconds When I issue a GET request to http://127.0.0.1:8010/patroni Then I receive a response code 200 And I receive a response replication_state streaming And postgres1 does not have a replication slot named test_logical Scenario: check switchover Given I run patronictl.py switchover batman1 --force Then Status code on GET http://127.0.0.1:8010/standby_leader is 200 after 10 seconds And postgres1 is replicating from postgres2 after 32 seconds And there is a postgres2_cb.log with "on_start replica batman1\non_role_change standby_leader batman1" in postgres2 data directory Scenario: check failover When I kill postgres2 And I kill postmaster on postgres2 Then postgres1 is replicating from postgres0 after 32 seconds And Status code on GET http://127.0.0.1:8009/standby_leader is 200 after 10 seconds When I issue a GET request to http://127.0.0.1:8009/primary Then I receive a response code 503 And I receive a response role standby_leader And replication works from postgres0 to postgres1 after 15 seconds And there is a postgres1_cb.log with "on_role_change replica batman1\non_role_change standby_leader batman1" in postgres1 data directory patroni-3.2.2/features/steps/000077500000000000000000000000001455170150700161535ustar00rootroot00000000000000patroni-3.2.2/features/steps/basic_replication.py000066400000000000000000000115261455170150700222040ustar00rootroot00000000000000import json import patroni.psycopg as pg from behave import step, then from time import sleep, time @step('I start {name:w}') def start_patroni(context, name): return context.pctl.start(name) @step('I start duplicate {name:w} on port {port:d}') def start_duplicate_patroni(context, name, port): config = { "name": name, "restapi": { "listen": "127.0.0.1:{0}".format(port) } } try: context.pctl.start('dup-' + name, custom_config=config) assert False, "Process was expected to fail" except AssertionError as e: assert 'is not running after being started' in str(e), \ "No error was raised by duplicate start of {0} ".format(name) @step('I shut down {name:w}') def stop_patroni(context, name): return context.pctl.stop(name, timeout=60) @step('I kill {name:w}') def kill_patroni(context, name): return context.pctl.stop(name, kill=True) @step('I shut down postmaster on {name:w}') def stop_postgres(context, name): return context.pctl.stop(name, postgres=True) @step('I kill postmaster on {name:w}') def kill_postgres(context, name): return context.pctl.stop(name, kill=True, postgres=True) @step('I add the table {table_name:w} to {pg_name:w}') def add_table(context, table_name, pg_name): # parse the configuration file and get the port try: context.pctl.query(pg_name, "CREATE TABLE public.{0}()".format(table_name)) except pg.Error as e: assert False, "Error creating table {0} on {1}: {2}".format(table_name, pg_name, e) @step('I {action:w} wal replay on {pg_name:w}') def toggle_wal_replay(context, action, pg_name): # pause or resume the wal replay process try: version = context.pctl.query(pg_name, "SHOW server_version_num").fetchone()[0] wal_name = 'xlog' if int(version) / 10000 < 10 else 'wal' context.pctl.query(pg_name, "SELECT pg_{0}_replay_{1}()".format(wal_name, action)) except pg.Error as e: assert False, "Error during {0} wal recovery on {1}: {2}".format(action, pg_name, e) @step('I {action:w} table on {pg_name:w}') def crdr_mytest(context, action, pg_name): try: if (action == "create"): context.pctl.query(pg_name, "create table if not exists public.mytest(id numeric)") else: context.pctl.query(pg_name, "drop table if exists public.mytest") except pg.Error as e: assert False, "Error {0} table mytest on {1}: {2}".format(action, pg_name, e) @step('I load data on {pg_name:w}') def initiate_load(context, pg_name): # perform dummy load try: context.pctl.query(pg_name, "insert into public.mytest select r::numeric from generate_series(1, 350000) r") except pg.Error as e: assert False, "Error loading test data on {0}: {1}".format(pg_name, e) @then('Table {table_name:w} is present on {pg_name:w} after {max_replication_delay:d} seconds') def table_is_present_on(context, table_name, pg_name, max_replication_delay): max_replication_delay *= context.timeout_multiplier for _ in range(int(max_replication_delay)): if context.pctl.query(pg_name, "SELECT 1 FROM public.{0}".format(table_name), fail_ok=True) is not None: break sleep(1) else: assert False, \ "Table {0} is not present on {1} after {2} seconds".format(table_name, pg_name, max_replication_delay) @then('{pg_name:w} role is the {pg_role:w} after {max_promotion_timeout:d} seconds') def check_role(context, pg_name, pg_role, max_promotion_timeout): max_promotion_timeout *= context.timeout_multiplier assert context.pctl.check_role_has_changed_to(pg_name, pg_role, timeout=int(max_promotion_timeout)), \ "{0} role didn't change to {1} after {2} seconds".format(pg_name, pg_role, max_promotion_timeout) @step('replication works from {primary:w} to {replica:w} after {time_limit:d} seconds') @then('replication works from {primary:w} to {replica:w} after {time_limit:d} seconds') def replication_works(context, primary, replica, time_limit): context.execute_steps(u""" When I add the table test_{0} to {1} Then table test_{0} is present on {2} after {3} seconds """.format(str(time()).replace('.', '_').replace(',', '_'), primary, replica, time_limit)) @step('there is one of {message_list} {level:w} in the {node} patroni log after {timeout:d} seconds') def check_patroni_log(context, message_list, level, node, timeout): timeout *= context.timeout_multiplier message_list = json.loads(message_list) for _ in range(int(timeout)): messsages_of_level = context.pctl.read_patroni_log(node, level) if any(any(message in line for line in messsages_of_level) for message in message_list): break sleep(1) else: assert False, f"There were none of {message_list} {level} in the {node} patroni log after {timeout} seconds" patroni-3.2.2/features/steps/cascading_replication.py000066400000000000000000000036721455170150700230420ustar00rootroot00000000000000import json import time from behave import step, then @step('I configure and start {name:w} with a tag {tag_name:w} {tag_value:w}') def start_patroni_with_a_name_value_tag(context, name, tag_name, tag_value): return context.pctl.start(name, custom_config={'tags': {tag_name: tag_value}}) @then('There is a {label} with "{content}" in {name:w} data directory') def check_label(context, label, content, name): value = (context.pctl.read_label(name, label) or '').replace('\n', '\\n') assert content in value, "\"{0}\" in {1} doesn't contain {2}".format(value, label, content) @step('I create label with "{content:w}" in {name:w} data directory') def write_label(context, content, name): context.pctl.write_label(name, content) @step('"{name}" key in DCS has {key:w}={value} after {time_limit:d} seconds') def check_member(context, name, key, value, time_limit): time_limit *= context.timeout_multiplier max_time = time.time() + int(time_limit) dcs_value = None while time.time() < max_time: try: response = json.loads(context.dcs_ctl.query(name)) dcs_value = str(response.get(key)) if dcs_value == value: return except Exception: pass time.sleep(1) assert False, "{0} does not have {1}={2} (found {3}) in dcs after {4} seconds".format(name, key, value, dcs_value, time_limit) @step('there is a non empty {key:w} key in DCS after {time_limit:d} seconds') def check_initialize(context, key, time_limit): time_limit *= context.timeout_multiplier max_time = time.time() + int(time_limit) while time.time() < max_time: try: if context.dcs_ctl.query(key): return except Exception: pass time.sleep(1) assert False, "There is no {0} in dcs after {1} seconds".format(key, time_limit) patroni-3.2.2/features/steps/citus.py000066400000000000000000000121601455170150700176540ustar00rootroot00000000000000import json import time from behave import step, then from dateutil import tz from datetime import datetime from functools import partial from threading import Thread, Event tzutc = tz.tzutc() @step('{name:w} is a leader in a group {group:d} after {time_limit:d} seconds') @then('{name:w} is a leader in a group {group:d} after {time_limit:d} seconds') def is_a_group_leader(context, name, group, time_limit): time_limit *= context.timeout_multiplier max_time = time.time() + int(time_limit) while (context.dcs_ctl.query("leader", group=group) != name): time.sleep(1) assert time.time() < max_time, "{0} is not a leader in dcs after {1} seconds".format(name, time_limit) @step('"{name}" key in a group {group:d} in DCS has {key:w}={value} after {time_limit:d} seconds') def check_group_member(context, name, group, key, value, time_limit): time_limit *= context.timeout_multiplier max_time = time.time() + int(time_limit) dcs_value = None response = None while time.time() < max_time: try: response = json.loads(context.dcs_ctl.query(name, group=group)) dcs_value = response.get(key) if dcs_value == value: return except Exception: pass time.sleep(1) assert False, ("{0} in a group {1} does not have {2}={3} (found {4}) in dcs" " after {5} seconds").format(name, group, key, value, response, time_limit) @step('I start {name:w} in citus group {group:d}') def start_citus(context, name, group): return context.pctl.start(name, custom_config={"citus": {"database": "postgres", "group": int(group)}}) @step('{name1:w} is registered in the {name2:w} as the {role:w} in group {group:d} after {time_limit:d} seconds') def check_registration(context, name1, name2, role, group, time_limit): time_limit *= context.timeout_multiplier max_time = time.time() + int(time_limit) worker_port = int(context.pctl.query(name1, "SHOW port").fetchone()[0]) while time.time() < max_time: try: cur = context.pctl.query(name2, "SELECT nodeport, noderole" " FROM pg_catalog.pg_dist_node WHERE groupid = {0}".format(group)) mapping = {r[0]: r[1] for r in cur} if mapping.get(worker_port) == role: return except Exception: pass time.sleep(1) assert False, "Node {0} is not registered in pg_dist_node on the node {1}".format(name1, name2) @step('I create a distributed table on {name:w}') def create_distributed_table(context, name): context.pctl.query(name, 'CREATE TABLE public.d(id int not null)') context.pctl.query(name, "SELECT create_distributed_table('public.d', 'id')") @step('I cleanup a distributed table on {name:w}') def cleanup_distributed_table(context, name): context.pctl.query(name, 'TRUNCATE public.d') def insert_thread(query_func, context): while True: if context.thread_stop_event.is_set(): break context.insert_counter += 1 query_func('INSERT INTO public.d VALUES({0})'.format(context.insert_counter)) context.thread_stop_event.wait(0.01) @step('I start a thread inserting data on {name:w}') def start_insert_thread(context, name): context.thread_stop_event = Event() context.insert_counter = 0 query_func = partial(context.pctl.query, name) thread_func = partial(insert_thread, query_func, context) context.thread = Thread(target=thread_func) context.thread.daemon = True context.thread.start() @then('a thread is still alive') def thread_is_alive(context): assert context.thread.is_alive(), "Thread is not alive" @step("I stop a thread") def stop_insert_thread(context): context.thread_stop_event.set() context.thread.join(1 * context.timeout_multiplier) assert not context.thread.is_alive(), "Thread is still alive" @step("a distributed table on {name:w} has expected rows") def count_rows(context, name): rows = context.pctl.query(name, "SELECT COUNT(*) FROM public.d").fetchone()[0] assert rows == context.insert_counter, "Distributed table doesn't have expected amount of rows" @step("there is a transaction in progress on {name:w} changing pg_dist_node after {time_limit:d} seconds") def check_transaction(context, name, time_limit): time_limit *= context.timeout_multiplier max_time = time.time() + int(time_limit) while time.time() < max_time: cur = context.pctl.query(name, "SELECT xact_start FROM pg_stat_activity WHERE pid <> pg_backend_pid()" " AND state = 'idle in transaction' AND query ~ 'citus_update_node'") if cur.rowcount == 1: context.xact_start = cur.fetchone()[0] return time.sleep(1) assert False, f"There is no idle in transaction on {name} updating pg_dist_node after {time_limit} seconds" @step("a transaction finishes in {timeout:d} seconds") def check_transaction_timeout(context, timeout): assert (datetime.now(tzutc) - context.xact_start).seconds >= timeout, \ "a transaction finished earlier than in {0} seconds".format(timeout) patroni-3.2.2/features/steps/custom_bootstrap.py000066400000000000000000000016751455170150700221450ustar00rootroot00000000000000import time from behave import step, then @step('I start {name:w} in a cluster {cluster_name:w} as a clone of {name2:w}') def start_cluster_clone(context, name, cluster_name, name2): context.pctl.clone(name2, cluster_name, name) @step('I start {name:w} in a cluster {cluster_name:w} from backup') def start_cluster_from_backup(context, name, cluster_name): context.pctl.bootstrap_from_backup(name, cluster_name) @then('{name:w} is a leader of {cluster_name:w} after {time_limit:d} seconds') def is_a_leader(context, name, cluster_name, time_limit): time_limit *= context.timeout_multiplier max_time = time.time() + int(time_limit) while (context.dcs_ctl.query("leader", scope=cluster_name) != name): time.sleep(1) assert time.time() < max_time, "{0} is not a leader in dcs after {1} seconds".format(name, time_limit) @step('I do a backup of {name:w}') def do_backup(context, name): context.pctl.backup(name) patroni-3.2.2/features/steps/dcs_failsafe_mode.py000066400000000000000000000006461455170150700221420ustar00rootroot00000000000000from behave import step @step('DCS is down') def start_dcs_outage(context): context.dcs_ctl.start_outage() @step('DCS is up') def stop_dcs_outage(context): context.dcs_ctl.stop_outage() @step('I start {name:w} in a cluster {cluster_name:w} from backup with no_leader') def start_cluster_from_backup_no_leader(context, name, cluster_name): context.pctl.bootstrap_from_backup_no_leader(name, cluster_name) patroni-3.2.2/features/steps/patroni_api.py000066400000000000000000000147711455170150700210440ustar00rootroot00000000000000import json import parse import shlex import subprocess import sys import time import yaml from behave import register_type, step, then from dateutil import tz from datetime import datetime, timedelta tzutc = tz.tzutc() @parse.with_pattern(r'https?://(?:\w|\.|:|/)+') def parse_url(text): return text register_type(url=parse_url) # there is no way we can find out if the node has already # started as a leader without checking the DCS. We cannot # just rely on the database availability, since there is # a short gap between the time PostgreSQL becomes available # and Patroni assuming the leader role. @step('{name:w} is a leader after {time_limit:d} seconds') @then('{name:w} is a leader after {time_limit:d} seconds') def is_a_leader(context, name, time_limit): time_limit *= context.timeout_multiplier max_time = time.time() + int(time_limit) while (context.dcs_ctl.query("leader") != name): time.sleep(1) assert time.time() < max_time, "{0} is not a leader in dcs after {1} seconds".format(name, time_limit) @step('I sleep for {value:d} seconds') def sleep_for_n_seconds(context, value): time.sleep(int(value)) def _set_response(context, response): context.status_code = response.status data = response.data.decode('utf-8') ct = response.getheader('content-type', '') if ct.startswith('application/json') or\ ct.startswith('text/yaml') or\ ct.startswith('text/x-yaml') or\ ct.startswith('application/yaml') or\ ct.startswith('application/x-yaml'): try: context.response = yaml.safe_load(data) except ValueError: context.response = data else: context.response = data @step('I issue a GET request to {url:url}') def do_get(context, url): do_request(context, 'GET', url, None) @step('I issue an empty POST request to {url:url}') def do_post_empty(context, url): do_request(context, 'POST', url, None) @step('I issue a {request_method:w} request to {url:url} with {data}') def do_request(context, request_method, url, data): if context.certfile: url = url.replace('http://', 'https://') data = data and json.loads(data) try: r = context.request_executor.request(request_method, url, data) if request_method == 'PATCH' and r.status == 409: r = context.request_executor.request(request_method, url, data) except Exception: context.status_code = context.response = None else: _set_response(context, r) @step('I run {cmd}') def do_run(context, cmd): cmd = [sys.executable, '-m', 'coverage', 'run', '--source=patroni', '-p'] + shlex.split(cmd) try: response = subprocess.check_output(cmd, stderr=subprocess.STDOUT) context.status_code = 0 except subprocess.CalledProcessError as e: response = e.output context.status_code = e.returncode context.response = response.decode('utf-8').strip() @then('I receive a response {component:w} {data}') def check_response(context, component, data): if component == 'code': assert context.status_code == int(data), \ "status code {0} != {1}, response: {2}".format(context.status_code, data, context.response) elif component == 'returncode': assert context.status_code == int(data), "return code {0} != {1}, {2}".format(context.status_code, data, context.response) elif component == 'text': assert context.response == data.strip('"'), "response {0} does not contain {1}".format(context.response, data) elif component == 'output': assert data.strip('"') in context.response, "response {0} does not contain {1}".format(context.response, data) else: assert component in context.response, "{0} is not part of the response".format(component) if context.certfile: data = data.replace('http://', 'https://') assert str(context.response[component]) == str(data), "{0} does not contain {1}".format(component, data) @step('I issue a scheduled switchover from {from_host:w} to {to_host:w} in {in_seconds:d} seconds') def scheduled_switchover(context, from_host, to_host, in_seconds): context.execute_steps(u""" Given I run patronictl.py switchover batman --master {0} --candidate {1} --scheduled "{2}" --force """.format(from_host, to_host, datetime.now(tzutc) + timedelta(seconds=int(in_seconds)))) @step('I issue a scheduled restart at {url:url} in {in_seconds:d} seconds with {data}') def scheduled_restart(context, url, in_seconds, data): data = data and json.loads(data) or {} data.update(schedule='{0}'.format((datetime.now(tzutc) + timedelta(seconds=int(in_seconds))).isoformat())) context.execute_steps(u"""Given I issue a POST request to {0}/restart with {1}""".format(url, json.dumps(data))) @step('I {action:w} {tag:w} tag in {pg_name:w} config') def add_bool_tag_to_config(context, action, tag, pg_name): value = action == 'set' context.pctl.add_tag_to_config(pg_name, tag, value) @step('I add tag {tag:w} {value:w} to {pg_name:w} config') def add_tag_to_config(context, tag, value, pg_name): context.pctl.add_tag_to_config(pg_name, tag, value) @then('Status code on GET {url:url} is {code:d} after {timeout:d} seconds') def check_http_code(context, url, code, timeout): if context.certfile: url = url.replace('http://', 'https://') timeout *= context.timeout_multiplier for _ in range(int(timeout)): r = context.request_executor.request('GET', url) if int(code) == int(r.status): break time.sleep(1) else: assert False, "HTTP Status Code is not {0} after {1} seconds".format(code, timeout) @then('Response on GET {url:url} contains {value} after {timeout:d} seconds') def check_http_response(context, url, value, timeout, negate=False): if context.certfile: url = url.replace('http://', 'https://') timeout *= context.timeout_multiplier for _ in range(int(timeout)): r = context.request_executor.request('GET', url) if (value in r.data.decode('utf-8')) != negate: break time.sleep(1) else: assert False, \ "Value {0} is {1} present in response after {2} seconds".format(value, "not" if not negate else "", timeout) @then('Response on GET {url} does not contain {value} after {timeout:d} seconds') def check_not_in_http_response(context, url, value, timeout): check_http_response(context, url, value, timeout, negate=True) patroni-3.2.2/features/steps/slots.py000066400000000000000000000116361455170150700177000ustar00rootroot00000000000000import json import time from behave import step, then import patroni.psycopg as pg @step('I create a logical replication slot {slot_name} on {pg_name:w} with the {plugin:w} plugin') def create_logical_replication_slot(context, slot_name, pg_name, plugin): try: output = context.pctl.query(pg_name, ("SELECT pg_create_logical_replication_slot('{0}', '{1}')," " current_database()").format(slot_name, plugin)) print(output.fetchone()) except pg.Error as e: print(e) assert False, "Error creating slot {0} on {1} with plugin {2}".format(slot_name, pg_name, plugin) @step('{pg_name:w} has a logical replication slot named {slot_name}' ' with the {plugin:w} plugin after {time_limit:d} seconds') @then('{pg_name:w} has a logical replication slot named {slot_name}' ' with the {plugin:w} plugin after {time_limit:d} seconds') def has_logical_replication_slot(context, pg_name, slot_name, plugin, time_limit): time_limit *= context.timeout_multiplier max_time = time.time() + int(time_limit) while time.time() < max_time: try: row = context.pctl.query(pg_name, ("SELECT slot_type, plugin FROM pg_replication_slots" f" WHERE slot_name = '{slot_name}'")).fetchone() if row: assert row[0] == "logical", f"Replication slot {slot_name} isn't a logical but {row[0]}" assert row[1] == plugin, f"Replication slot {slot_name} using plugin {row[1]} rather than {plugin}" return except Exception: pass time.sleep(1) assert False, f"Error looking for slot {slot_name} on {pg_name} with plugin {plugin}" @step('{pg_name:w} does not have a replication slot named {slot_name:w}') @then('{pg_name:w} does not have a replication slot named {slot_name:w}') def does_not_have_replication_slot(context, pg_name, slot_name): try: row = context.pctl.query(pg_name, ("SELECT 1 FROM pg_replication_slots" " WHERE slot_name = '{0}'").format(slot_name)).fetchone() assert not row, "Found unexpected replication slot named {0}".format(slot_name) except pg.Error: assert False, "Error looking for slot {0} on {1}".format(slot_name, pg_name) @step('{slot_type:w} slot {slot_name:w} is in sync between {pg_name1:w} and {pg_name2:w} after {time_limit:d} seconds') def slots_in_sync(context, slot_type, slot_name, pg_name1, pg_name2, time_limit): time_limit *= context.timeout_multiplier max_time = time.time() + int(time_limit) column = 'confirmed_flush_lsn' if slot_type.lower() == 'logical' else 'restart_lsn' query = f"SELECT {column} FROM pg_replication_slots WHERE slot_name = '{slot_name}'" while time.time() < max_time: try: slot1 = context.pctl.query(pg_name1, query).fetchone() slot2 = context.pctl.query(pg_name2, query).fetchone() if slot1[0] == slot2[0]: return except Exception: pass time.sleep(1) assert False, \ f"{slot_type} slot {slot_name} is not in sync between {pg_name1} and {pg_name2} after {time_limit} seconds" @step('I get all changes from logical slot {slot_name:w} on {pg_name:w}') def logical_slot_get_changes(context, slot_name, pg_name): context.pctl.query(pg_name, "SELECT * FROM pg_logical_slot_get_changes('{0}', NULL, NULL)".format(slot_name)) @step('I get all changes from physical slot {slot_name:w} on {pg_name:w}') def physical_slot_get_changes(context, slot_name, pg_name): context.pctl.query(pg_name, f"SELECT * FROM pg_replication_slot_advance('{slot_name}', pg_current_wal_lsn())") @step('{pg_name:w} has a physical replication slot named {slot_name} after {time_limit:d} seconds') def has_physical_replication_slot(context, pg_name, slot_name, time_limit): time_limit *= context.timeout_multiplier max_time = time.time() + int(time_limit) query = f"SELECT * FROM pg_catalog.pg_replication_slots WHERE slot_type = 'physical' AND slot_name = '{slot_name}'" while time.time() < max_time: try: row = context.pctl.query(pg_name, query).fetchone() if row: return except Exception: pass time.sleep(1) assert False, f"Physical slot {slot_name} doesn't exist after {time_limit} seconds" @step('"{name}" key in DCS has {subkey:w} in {key:w}') def dcs_key_contains(context, name, subkey, key): response = json.loads(context.dcs_ctl.query(name)) assert key in response and subkey in response[key], f"{name} key in DCS doesn't have {subkey} in {key}" @step('"{name}" key in DCS does not have {subkey:w} in {key:w}') def dcs_key_does_not_contain(context, name, subkey, key): response = json.loads(context.dcs_ctl.query(name)) assert key not in response or subkey not in response[key], f"{name} key in DCS has {subkey} in {key}" patroni-3.2.2/features/steps/standby_cluster.py000066400000000000000000000046221455170150700217360ustar00rootroot00000000000000import os import time from behave import step def callbacks(context, name): return {c: '{0} features/callback2.py {1}'.format(context.pctl.PYTHON, name) for c in ('on_start', 'on_stop', 'on_restart', 'on_role_change')} @step('I start {name:w} in a cluster {cluster_name:w}') def start_patroni(context, name, cluster_name): return context.pctl.start(name, custom_config={ "scope": cluster_name, "postgresql": { "callbacks": callbacks(context, name), "backup_restore": context.pctl.backup_restore_config() } }) @step('I start {name:w} in a standby cluster {cluster_name:w} as a clone of {name2:w}') def start_patroni_standby_cluster(context, name, cluster_name, name2): # we need to remove patroni.dynamic.json in order to "bootstrap" standby cluster with existing PGDATA os.unlink(os.path.join(context.pctl._processes[name]._data_dir, 'patroni.dynamic.json')) port = context.pctl._processes[name2]._connkwargs.get('port') context.pctl._processes[name].update_config({ "scope": cluster_name, "bootstrap": { "dcs": { "ttl": 20, "loop_wait": 2, "retry_timeout": 5, "synchronous_mode": True, # should be completely ignored "standby_cluster": { "host": "localhost", "port": port, "primary_slot_name": "pm_1", "create_replica_methods": ["backup_restore", "basebackup"] }, "postgresql": {"parameters": {"wal_level": "logical"}} } }, "postgresql": { "callbacks": callbacks(context, name) } }) return context.pctl.start(name) @step('{pg_name1:w} is replicating from {pg_name2:w} after {timeout:d} seconds') def check_replication_status(context, pg_name1, pg_name2, timeout): bound_time = time.time() + timeout * context.timeout_multiplier while time.time() < bound_time: cur = context.pctl.query( pg_name2, "SELECT * FROM pg_catalog.pg_stat_replication WHERE application_name = '{0}'".format(pg_name1), fail_ok=True ) if cur and len(cur.fetchall()) != 0: break time.sleep(1) else: assert False, "{0} is not replicating from {1} after {2} seconds".format(pg_name1, pg_name2, timeout) patroni-3.2.2/features/steps/watchdog.py000066400000000000000000000032251455170150700203270ustar00rootroot00000000000000from behave import step, then import time def polling_loop(timeout, interval=1): """Returns an iterator that returns values until timeout has passed. Timeout is measured from start of iteration.""" start_time = time.time() iteration = 0 end_time = start_time + timeout while time.time() < end_time: yield iteration iteration += 1 time.sleep(interval) @step('I start {name:w} with watchdog') def start_patroni_with_watchdog(context, name): return context.pctl.start(name, custom_config={'watchdog': True, 'bootstrap': {'dcs': {'ttl': 20}}}) @step('{name:w} watchdog has been pinged after {timeout:d} seconds') def watchdog_was_pinged(context, name, timeout): for _ in polling_loop(timeout): if context.pctl.get_watchdog(name).was_pinged: return True return False @then('{name:w} watchdog has been closed') def watchdog_was_closed(context, name): assert context.pctl.get_watchdog(name).was_closed @step('{name:w} watchdog has a {timeout:d} second timeout') def watchdog_has_timeout(context, name, timeout): assert context.pctl.get_watchdog(name).timeout == timeout @step('I reset {name:w} watchdog state') def watchdog_reset_pinged(context, name): context.pctl.get_watchdog(name).reset() @then('{name:w} watchdog is triggered after {timeout:d} seconds') def watchdog_was_triggered(context, name, timeout): for _ in polling_loop(timeout): if context.pctl.get_watchdog(name).was_triggered: return True assert False @step('{name:w} hangs for {timeout:d} seconds') def patroni_hang(context, name, timeout): return context.pctl.patroni_hang(name, timeout) patroni-3.2.2/features/watchdog.feature000066400000000000000000000030571455170150700201770ustar00rootroot00000000000000Feature: watchdog Verify that watchdog gets pinged and triggered under appropriate circumstances. Scenario: watchdog is opened and pinged Given I start postgres0 with watchdog Then postgres0 is a leader after 10 seconds And postgres0 role is the primary after 10 seconds And postgres0 watchdog has been pinged after 10 seconds And postgres0 watchdog has a 15 second timeout Scenario: watchdog is reconfigured after global ttl changed Given I run patronictl.py edit-config batman -s ttl=30 --force Then I receive a response returncode 0 And I receive a response output "+ttl: 30" When I sleep for 4 seconds Then postgres0 watchdog has a 25 second timeout Scenario: watchdog is disabled during pause Given I run patronictl.py pause batman Then I receive a response returncode 0 When I sleep for 2 seconds Then postgres0 watchdog has been closed Scenario: watchdog is opened and pinged after resume Given I reset postgres0 watchdog state And I run patronictl.py resume batman Then I receive a response returncode 0 And postgres0 watchdog has been pinged after 10 seconds Scenario: watchdog is disabled when shutting down Given I shut down postgres0 Then postgres0 watchdog has been closed Scenario: watchdog is triggered if patroni stops responding Given I reset postgres0 watchdog state And I start postgres0 with watchdog Then postgres0 role is the primary after 10 seconds When postgres0 hangs for 30 seconds Then postgres0 watchdog is triggered after 30 seconds patroni-3.2.2/haproxy.cfg000066400000000000000000000010601455170150700153470ustar00rootroot00000000000000global maxconn 100 defaults log global mode tcp retries 2 timeout client 30m timeout connect 4s timeout server 30m timeout check 5s listen stats mode http bind *:7000 stats enable stats uri / listen batman bind *:5000 option httpchk http-check expect status 200 default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions server postgresql_127.0.0.1_5432 127.0.0.1:5432 maxconn 100 check port 8008 server postgresql_127.0.0.1_5433 127.0.0.1:5433 maxconn 100 check port 8009 patroni-3.2.2/kubernetes/000077500000000000000000000000001455170150700153465ustar00rootroot00000000000000patroni-3.2.2/kubernetes/Dockerfile000066400000000000000000000026421455170150700173440ustar00rootroot00000000000000FROM postgres:15 LABEL maintainer="Alexander Kukushkin " RUN export DEBIAN_FRONTEND=noninteractive \ && echo 'APT::Install-Recommends "0";\nAPT::Install-Suggests "0";' > /etc/apt/apt.conf.d/01norecommend \ && apt-get update -y \ && apt-cache depends patroni | sed -n -e 's/.* Depends: \(python3-.\+\)$/\1/p' \ | grep -Ev '^python3-(sphinx|etcd|consul|kazoo|kubernetes)' \ | xargs apt-get install -y vim-tiny curl jq locales git python3-pip python3-wheel \ ## Make sure we have a en_US.UTF-8 locale available && localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 \ && pip3 install --break-system-packages setuptools \ && pip3 install --break-system-packages 'git+https://github.com/zalando/patroni.git#egg=patroni[kubernetes]' \ && PGHOME=/home/postgres \ && mkdir -p $PGHOME \ && chown postgres $PGHOME \ && sed -i "s|/var/lib/postgresql.*|$PGHOME:/bin/bash|" /etc/passwd \ # Set permissions for OpenShift && chmod 775 $PGHOME \ && chmod 664 /etc/passwd \ # Clean up && apt-get remove -y git python3-pip python3-wheel \ && apt-get autoremove -y \ && apt-get clean -y \ && rm -rf /var/lib/apt/lists/* /root/.cache COPY entrypoint.sh / EXPOSE 5432 8008 ENV LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 EDITOR=/usr/bin/editor USER postgres WORKDIR /home/postgres CMD ["/bin/bash", "/entrypoint.sh"] patroni-3.2.2/kubernetes/Dockerfile.citus000066400000000000000000000074611455170150700204760ustar00rootroot00000000000000FROM postgres:15 LABEL maintainer="Alexander Kukushkin " RUN export DEBIAN_FRONTEND=noninteractive \ && echo 'APT::Install-Recommends "0";\nAPT::Install-Suggests "0";' > /etc/apt/apt.conf.d/01norecommend \ && apt-get update -y \ && apt-get upgrade -y \ && apt-cache depends patroni | sed -n -e 's/.* Depends: \(python3-.\+\)$/\1/p' \ | grep -Ev '^python3-(sphinx|etcd|consul|kazoo|kubernetes)' \ | xargs apt-get install -y busybox vim-tiny curl jq less locales git python3-pip python3-wheel lsb-release \ ## Make sure we have a en_US.UTF-8 locale available && localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 \ && if [ $(dpkg --print-architecture) = 'arm64' ]; then \ apt-get install -y postgresql-server-dev-15 \ gcc make autoconf \ libc6-dev flex libcurl4-gnutls-dev \ libicu-dev libkrb5-dev liblz4-dev \ libpam0g-dev libreadline-dev libselinux1-dev\ libssl-dev libxslt1-dev libzstd-dev uuid-dev \ && git clone -b "main" https://github.com/citusdata/citus.git \ && MAKEFLAGS="-j $(grep -c ^processor /proc/cpuinfo)" \ && cd citus && ./configure && make install && cd ../ && rm -rf /citus; \ else \ echo "deb [signed-by=/etc/apt/trusted.gpg.d/citusdata_community.gpg] https://packagecloud.io/citusdata/community/debian/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/citusdata_community.list \ && curl -sL https://packagecloud.io/citusdata/community/gpgkey | gpg --dearmor > /etc/apt/trusted.gpg.d/citusdata_community.gpg \ && apt-get update -y \ && apt-get -y install postgresql-15-citus-12.0; \ fi \ && pip3 install --break-system-packages setuptools \ && pip3 install --break-system-packages 'git+https://github.com/zalando/patroni.git#egg=patroni[kubernetes]' \ && PGHOME=/home/postgres \ && mkdir -p $PGHOME \ && chown postgres $PGHOME \ && sed -i "s|/var/lib/postgresql.*|$PGHOME:/bin/bash|" /etc/passwd \ && /bin/busybox --install -s \ # Set permissions for OpenShift && chmod 775 $PGHOME \ && chmod 664 /etc/passwd \ # Clean up && apt-get remove -y git python3-pip python3-wheel \ postgresql-server-dev-15 gcc make autoconf \ libc6-dev flex libicu-dev libkrb5-dev liblz4-dev \ libpam0g-dev libreadline-dev libselinux1-dev libssl-dev libxslt1-dev libzstd-dev uuid-dev \ && apt-get autoremove -y \ && apt-get clean -y \ && rm -rf /var/lib/apt/lists/* /root/.cache ADD entrypoint.sh / ENV PGSSLMODE=verify-ca PGSSLKEY=/etc/ssl/private/ssl-cert-snakeoil.key PGSSLCERT=/etc/ssl/certs/ssl-cert-snakeoil.pem PGSSLROOTCERT=/etc/ssl/certs/ssl-cert-snakeoil.pem RUN sed -i 's/^postgresql:/&\n basebackup:\n checkpoint: fast/' /entrypoint.sh \ && sed -i "s|^ postgresql:|&\n parameters:\n max_connections: 100\n shared_buffers: 16MB\n ssl: 'on'\n ssl_ca_file: $PGSSLROOTCERT\n ssl_cert_file: $PGSSLCERT\n ssl_key_file: $PGSSLKEY\n citus.node_conninfo: 'sslrootcert=$PGSSLROOTCERT sslkey=$PGSSLKEY sslcert=$PGSSLCERT sslmode=$PGSSLMODE'|" /entrypoint.sh \ && sed -i 's/^ pg_hba:/&\n - local all all trust/' /entrypoint.sh \ && sed -i "s/^\(.*\) \(.*\) \(.*\) \(.*\) \(.*\) md5.*$/\1 hostssl \3 \4 all md5 clientcert=$PGSSLMODE/" /entrypoint.sh \ && sed -i "s#^ \(superuser\|replication\):#&\n sslmode: $PGSSLMODE\n sslkey: $PGSSLKEY\n sslcert: $PGSSLCERT\n sslrootcert: $PGSSLROOTCERT#" /entrypoint.sh EXPOSE 5432 8008 ENV LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 EDITOR=/usr/bin/editor USER postgres WORKDIR /home/postgres CMD ["/bin/bash", "/entrypoint.sh"] patroni-3.2.2/kubernetes/README.md000066400000000000000000000156321455170150700166340ustar00rootroot00000000000000# Kubernetes deployment examples Below you will find examples of Patroni deployments using [kind](https://kind.sigs.k8s.io/). # Patroni on K8s The Patroni cluster deployment with a StatefulSet consisting of three Pods. Example session: $ kind create cluster Creating cluster "kind" ... ✓ Ensuring node image (kindest/node:v1.25.3) 🖼 ✓ Preparing nodes 📦 ✓ Writing configuration 📜 ✓ Starting control-plane ðŸ•¹ï¸ âœ“ Installing CNI 🔌 ✓ Installing StorageClass 💾 Set kubectl context to "kind-kind" You can now use your cluster with: kubectl cluster-info --context kind-kind Thanks for using kind! 😊 $ docker build -t patroni . Sending build context to Docker daemon 138.8kB Step 1/9 : FROM postgres:15 ... Successfully built e9bfe69c5d2b Successfully tagged patroni:latest $ kind load docker-image patroni Image: "" with ID "sha256:e9bfe69c5d2b319dec0cf564fb895484537664775e18f37f9b707914cc5537e6" not yet present on node "kind-control-plane", loading... $ kubectl apply -f patroni_k8s.yaml service/patronidemo-config created statefulset.apps/patronidemo created endpoints/patronidemo created service/patronidemo created service/patronidemo-repl created secret/patronidemo created serviceaccount/patronidemo created role.rbac.authorization.k8s.io/patronidemo created rolebinding.rbac.authorization.k8s.io/patronidemo created clusterrole.rbac.authorization.k8s.io/patroni-k8s-ep-access created clusterrolebinding.rbac.authorization.k8s.io/patroni-k8s-ep-access created $ kubectl get pods -L role NAME READY STATUS RESTARTS AGE ROLE patronidemo-0 1/1 Running 0 34s master patronidemo-1 1/1 Running 0 30s replica patronidemo-2 1/1 Running 0 26s replica $ kubectl exec -ti patronidemo-0 -- bash postgres@patronidemo-0:~$ patronictl list + Cluster: patronidemo (7186662553319358497) ----+----+-----------+ | Member | Host | Role | State | TL | Lag in MB | +---------------+------------+---------+---------+----+-----------+ | patronidemo-0 | 10.244.0.5 | Leader | running | 1 | | | patronidemo-1 | 10.244.0.6 | Replica | running | 1 | 0 | | patronidemo-2 | 10.244.0.7 | Replica | running | 1 | 0 | +---------------+------------+---------+---------+----+-----------+ # Citus on K8s The Citus cluster with the StatefulSets, one coordinator with three Pods and two workers with two pods each. Example session: $ kind create cluster Creating cluster "kind" ... ✓ Ensuring node image (kindest/node:v1.25.3) 🖼 ✓ Preparing nodes 📦 ✓ Writing configuration 📜 ✓ Starting control-plane ðŸ•¹ï¸ âœ“ Installing CNI 🔌 ✓ Installing StorageClass 💾 Set kubectl context to "kind-kind" You can now use your cluster with: kubectl cluster-info --context kind-kind Thanks for using kind! 😊 demo@localhost:~/git/patroni/kubernetes$ docker build -f Dockerfile.citus -t patroni-citus-k8s . Sending build context to Docker daemon 138.8kB Step 1/11 : FROM postgres:15 ... Successfully built 8cd73e325028 Successfully tagged patroni-citus-k8s:latest $ kind load docker-image patroni-citus-k8s Image: "" with ID "sha256:8cd73e325028d7147672494965e53453f5540400928caac0305015eb2c7027c7" not yet present on node "kind-control-plane", loading... $ kubectl apply -f citus_k8s.yaml service/citusdemo-0-config created service/citusdemo-1-config created service/citusdemo-2-config created statefulset.apps/citusdemo-0 created statefulset.apps/citusdemo-1 created statefulset.apps/citusdemo-2 created endpoints/citusdemo-0 created service/citusdemo-0 created endpoints/citusdemo-1 created service/citusdemo-1 created endpoints/citusdemo-2 created service/citusdemo-2 created service/citusdemo-workers created secret/citusdemo created serviceaccount/citusdemo created role.rbac.authorization.k8s.io/citusdemo created rolebinding.rbac.authorization.k8s.io/citusdemo created clusterrole.rbac.authorization.k8s.io/patroni-k8s-ep-access created clusterrolebinding.rbac.authorization.k8s.io/patroni-k8s-ep-access created $ kubectl get sts NAME READY AGE citusdemo-0 1/3 6s # coodinator (group=0) citusdemo-1 1/2 6s # worker (group=1) citusdemo-2 1/2 6s # worker (group=2) $ kubectl get pods -l cluster-name=citusdemo -L role NAME READY STATUS RESTARTS AGE ROLE citusdemo-0-0 1/1 Running 0 105s master citusdemo-0-1 1/1 Running 0 101s replica citusdemo-0-2 1/1 Running 0 96s replica citusdemo-1-0 1/1 Running 0 105s master citusdemo-1-1 1/1 Running 0 101s replica citusdemo-2-0 1/1 Running 0 105s master citusdemo-2-1 1/1 Running 0 101s replica $ kubectl exec -ti citusdemo-0-0 -- bash postgres@citusdemo-0-0:~$ patronictl list + Citus cluster: citusdemo -----------+--------------+---------+----+-----------+ | Group | Member | Host | Role | State | TL | Lag in MB | +-------+---------------+-------------+--------------+---------+----+-----------+ | 0 | citusdemo-0-0 | 10.244.0.10 | Leader | running | 1 | | | 0 | citusdemo-0-1 | 10.244.0.12 | Replica | running | 1 | 0 | | 0 | citusdemo-0-2 | 10.244.0.14 | Sync Standby | running | 1 | 0 | | 1 | citusdemo-1-0 | 10.244.0.8 | Leader | running | 1 | | | 1 | citusdemo-1-1 | 10.244.0.11 | Sync Standby | running | 1 | 0 | | 2 | citusdemo-2-0 | 10.244.0.9 | Leader | running | 1 | | | 2 | citusdemo-2-1 | 10.244.0.13 | Sync Standby | running | 1 | 0 | +-------+---------------+-------------+--------------+---------+----+-----------+ postgres@citusdemo-0-0:~$ psql citus psql (15.1 (Debian 15.1-1.pgdg110+1)) Type "help" for help. citus=# table pg_dist_node; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards --------+---------+-------------+----------+----------+-------------+----------+----------+-------------+----------------+------------------ 1 | 0 | 10.244.0.10 | 5432 | default | t | t | primary | default | t | f 2 | 1 | 10.244.0.8 | 5432 | default | t | t | primary | default | t | t 3 | 2 | 10.244.0.9 | 5432 | default | t | t | primary | default | t | t (3 rows) patroni-3.2.2/kubernetes/citus_k8s.yaml000066400000000000000000000327251455170150700201570ustar00rootroot00000000000000# headless services to avoid deletion of citusdemo-*-config endpoints apiVersion: v1 kind: Service metadata: name: citusdemo-0-config labels: application: patroni cluster-name: citusdemo citus-group: '0' spec: clusterIP: None --- apiVersion: v1 kind: Service metadata: name: citusdemo-1-config labels: application: patroni cluster-name: citusdemo citus-group: '1' spec: clusterIP: None --- apiVersion: v1 kind: Service metadata: name: citusdemo-2-config labels: application: patroni cluster-name: citusdemo citus-group: '2' spec: clusterIP: None --- apiVersion: apps/v1 kind: StatefulSet metadata: name: &cluster_name citusdemo-0 labels: &labels application: patroni cluster-name: citusdemo citus-group: '0' citus-type: coordinator spec: replicas: 3 serviceName: *cluster_name selector: matchLabels: <<: *labels template: metadata: labels: <<: *labels spec: serviceAccountName: citusdemo containers: - name: *cluster_name image: patroni-citus-k8s # docker build -f Dockerfile.citus -t patroni-citus-k8s . imagePullPolicy: IfNotPresent readinessProbe: httpGet: scheme: HTTP path: /readiness port: 8008 initialDelaySeconds: 3 periodSeconds: 10 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 3 ports: - containerPort: 8008 protocol: TCP - containerPort: 5432 protocol: TCP volumeMounts: - mountPath: /home/postgres/pgdata name: pgdata env: - name: PATRONI_KUBERNETES_POD_IP valueFrom: fieldRef: fieldPath: status.podIP - name: PATRONI_KUBERNETES_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: PATRONI_KUBERNETES_BYPASS_API_SERVICE value: 'true' - name: PATRONI_KUBERNETES_USE_ENDPOINTS value: 'true' - name: PATRONI_KUBERNETES_LABELS value: '{application: patroni, cluster-name: citusdemo}' - name: PATRONI_CITUS_DATABASE value: citus - name: PATRONI_CITUS_GROUP value: '0' - name: PATRONI_SUPERUSER_USERNAME value: postgres - name: PATRONI_SUPERUSER_PASSWORD valueFrom: secretKeyRef: name: citusdemo key: superuser-password - name: PATRONI_REPLICATION_USERNAME value: standby - name: PATRONI_REPLICATION_PASSWORD valueFrom: secretKeyRef: name: citusdemo key: replication-password - name: PATRONI_SCOPE value: citusdemo - name: PATRONI_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: PATRONI_POSTGRESQL_DATA_DIR value: /home/postgres/pgdata/pgroot/data - name: PATRONI_POSTGRESQL_PGPASS value: /tmp/pgpass - name: PATRONI_POSTGRESQL_LISTEN value: '0.0.0.0:5432' - name: PATRONI_RESTAPI_LISTEN value: '0.0.0.0:8008' terminationGracePeriodSeconds: 0 volumes: - name: pgdata emptyDir: {} # volumeClaimTemplates: # - metadata: # labels: # application: spilo # spilo-cluster: *cluster_name # annotations: # volume.alpha.kubernetes.io/storage-class: anything # name: pgdata # spec: # accessModes: # - ReadWriteOnce # resources: # requests: # storage: 5Gi --- apiVersion: apps/v1 kind: StatefulSet metadata: name: &cluster_name citusdemo-1 labels: &labels application: patroni cluster-name: citusdemo citus-group: '1' citus-type: worker spec: replicas: 2 serviceName: *cluster_name selector: matchLabels: <<: *labels template: metadata: labels: <<: *labels spec: serviceAccountName: citusdemo containers: - name: *cluster_name image: patroni-citus-k8s # docker build -f Dockerfile.citus -t patroni-citus-k8s . imagePullPolicy: IfNotPresent readinessProbe: httpGet: scheme: HTTP path: /readiness port: 8008 initialDelaySeconds: 3 periodSeconds: 10 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 3 ports: - containerPort: 8008 protocol: TCP - containerPort: 5432 protocol: TCP volumeMounts: - mountPath: /home/postgres/pgdata name: pgdata env: - name: PATRONI_KUBERNETES_POD_IP valueFrom: fieldRef: fieldPath: status.podIP - name: PATRONI_KUBERNETES_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: PATRONI_KUBERNETES_BYPASS_API_SERVICE value: 'true' - name: PATRONI_KUBERNETES_USE_ENDPOINTS value: 'true' - name: PATRONI_KUBERNETES_LABELS value: '{application: patroni, cluster-name: citusdemo}' - name: PATRONI_CITUS_DATABASE value: citus - name: PATRONI_CITUS_GROUP value: '1' - name: PATRONI_SUPERUSER_USERNAME value: postgres - name: PATRONI_SUPERUSER_PASSWORD valueFrom: secretKeyRef: name: citusdemo key: superuser-password - name: PATRONI_REPLICATION_USERNAME value: standby - name: PATRONI_REPLICATION_PASSWORD valueFrom: secretKeyRef: name: citusdemo key: replication-password - name: PATRONI_SCOPE value: citusdemo - name: PATRONI_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: PATRONI_POSTGRESQL_DATA_DIR value: /home/postgres/pgdata/pgroot/data - name: PATRONI_POSTGRESQL_PGPASS value: /tmp/pgpass - name: PATRONI_POSTGRESQL_LISTEN value: '0.0.0.0:5432' - name: PATRONI_RESTAPI_LISTEN value: '0.0.0.0:8008' terminationGracePeriodSeconds: 0 volumes: - name: pgdata emptyDir: {} # volumeClaimTemplates: # - metadata: # labels: # application: spilo # spilo-cluster: *cluster_name # annotations: # volume.alpha.kubernetes.io/storage-class: anything # name: pgdata # spec: # accessModes: # - ReadWriteOnce # resources: # requests: # storage: 5Gi --- apiVersion: apps/v1 kind: StatefulSet metadata: name: &cluster_name citusdemo-2 labels: &labels application: patroni cluster-name: citusdemo citus-group: '2' citus-type: worker spec: replicas: 2 serviceName: *cluster_name selector: matchLabels: <<: *labels template: metadata: labels: <<: *labels spec: serviceAccountName: citusdemo containers: - name: *cluster_name image: patroni-citus-k8s # docker build -f Dockerfile.citus -t patroni-citus-k8s . imagePullPolicy: IfNotPresent readinessProbe: httpGet: scheme: HTTP path: /readiness port: 8008 initialDelaySeconds: 3 periodSeconds: 10 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 3 ports: - containerPort: 8008 protocol: TCP - containerPort: 5432 protocol: TCP volumeMounts: - mountPath: /home/postgres/pgdata name: pgdata env: - name: PATRONI_KUBERNETES_POD_IP valueFrom: fieldRef: fieldPath: status.podIP - name: PATRONI_KUBERNETES_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: PATRONI_KUBERNETES_BYPASS_API_SERVICE value: 'true' - name: PATRONI_KUBERNETES_USE_ENDPOINTS value: 'true' - name: PATRONI_KUBERNETES_LABELS value: '{application: patroni, cluster-name: citusdemo}' - name: PATRONI_CITUS_DATABASE value: citus - name: PATRONI_CITUS_GROUP value: '2' - name: PATRONI_SUPERUSER_USERNAME value: postgres - name: PATRONI_SUPERUSER_PASSWORD valueFrom: secretKeyRef: name: citusdemo key: superuser-password - name: PATRONI_REPLICATION_USERNAME value: standby - name: PATRONI_REPLICATION_PASSWORD valueFrom: secretKeyRef: name: citusdemo key: replication-password - name: PATRONI_SCOPE value: citusdemo - name: PATRONI_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: PATRONI_POSTGRESQL_DATA_DIR value: /home/postgres/pgdata/pgroot/data - name: PATRONI_POSTGRESQL_PGPASS value: /tmp/pgpass - name: PATRONI_POSTGRESQL_LISTEN value: '0.0.0.0:5432' - name: PATRONI_RESTAPI_LISTEN value: '0.0.0.0:8008' terminationGracePeriodSeconds: 0 volumes: - name: pgdata emptyDir: {} # volumeClaimTemplates: # - metadata: # labels: # application: spilo # spilo-cluster: *cluster_name # annotations: # volume.alpha.kubernetes.io/storage-class: anything # name: pgdata # spec: # accessModes: # - ReadWriteOnce # resources: # requests: # storage: 5Gi --- apiVersion: v1 kind: Endpoints metadata: name: citusdemo-0 labels: application: patroni cluster-name: citusdemo citus-group: '0' citus-type: coordinator subsets: [] --- apiVersion: v1 kind: Service metadata: name: citusdemo-0 labels: application: patroni cluster-name: citusdemo citus-group: '0' citus-type: coordinator spec: type: ClusterIP ports: - port: 5432 targetPort: 5432 --- apiVersion: v1 kind: Endpoints metadata: name: citusdemo-1 labels: application: patroni cluster-name: citusdemo citus-group: '1' citus-type: worker subsets: [] --- apiVersion: v1 kind: Service metadata: name: citusdemo-1 labels: application: patroni cluster-name: citusdemo citus-group: '1' citus-type: worker spec: type: ClusterIP ports: - port: 5432 targetPort: 5432 --- apiVersion: v1 kind: Endpoints metadata: name: citusdemo-2 labels: application: patroni cluster-name: citusdemo citus-group: '2' citus-type: worker subsets: [] --- apiVersion: v1 kind: Service metadata: name: citusdemo-2 labels: application: patroni cluster-name: citusdemo citus-group: '2' citus-type: worker spec: type: ClusterIP ports: - port: 5432 targetPort: 5432 --- apiVersion: v1 kind: Service metadata: name: citusdemo-workers labels: &labels application: patroni cluster-name: citusdemo citus-type: worker role: master spec: type: ClusterIP selector: <<: *labels ports: - port: 5432 targetPort: 5432 --- apiVersion: v1 kind: Secret metadata: name: &cluster_name citusdemo labels: application: patroni cluster-name: *cluster_name type: Opaque data: superuser-password: emFsYW5kbw== replication-password: cmVwLXBhc3M= --- apiVersion: v1 kind: ServiceAccount metadata: name: citusdemo --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: citusdemo rules: - apiGroups: - "" resources: - configmaps verbs: - create - get - list - patch - update - watch # delete and deletecollection are required only for 'patronictl remove' - delete - deletecollection - apiGroups: - "" resources: - endpoints verbs: - get - patch - update # the following three privileges are necessary only when using endpoints - create - list - watch # delete and deletecollection are required only for for 'patronictl remove' - delete - deletecollection - apiGroups: - "" resources: - pods verbs: - get - list - patch - update - watch # The following privilege is only necessary for creation of headless service # for citusdemo-config endpoint, in order to prevent cleaning it up by the # k8s master. You can avoid giving this privilege by explicitly creating the # service like it is done in this manifest (lines 2..10) - apiGroups: - "" resources: - services verbs: - create --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: citusdemo roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: citusdemo subjects: - kind: ServiceAccount name: citusdemo # Following privileges are only required if deployed not in the "default" # namespace and you want Patroni to bypass kubernetes service # (PATRONI_KUBERNETES_BYPASS_API_SERVICE=true) --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: patroni-k8s-ep-access rules: - apiGroups: - "" resources: - endpoints resourceNames: - kubernetes verbs: - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: patroni-k8s-ep-access roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: patroni-k8s-ep-access subjects: - kind: ServiceAccount name: citusdemo # The namespace must be specified explicitly. # If deploying to the different namespace you have to change it. namespace: default patroni-3.2.2/kubernetes/entrypoint.sh000077500000000000000000000017331455170150700201240ustar00rootroot00000000000000#!/bin/bash if [[ $UID -ge 10000 ]]; then GID=$(id -g) sed -e "s/^postgres:x:[^:]*:[^:]*:/postgres:x:$UID:$GID:/" /etc/passwd > /tmp/passwd cat /tmp/passwd > /etc/passwd rm /tmp/passwd fi cat > /home/postgres/patroni.yml <<__EOF__ bootstrap: dcs: postgresql: use_pg_rewind: true pg_hba: - host all all 0.0.0.0/0 md5 - host replication ${PATRONI_REPLICATION_USERNAME} ${PATRONI_KUBERNETES_POD_IP}/16 md5 initdb: - auth-host: md5 - auth-local: trust - encoding: UTF8 - locale: en_US.UTF-8 - data-checksums restapi: connect_address: '${PATRONI_KUBERNETES_POD_IP}:8008' postgresql: connect_address: '${PATRONI_KUBERNETES_POD_IP}:5432' authentication: superuser: password: '${PATRONI_SUPERUSER_PASSWORD}' replication: password: '${PATRONI_REPLICATION_PASSWORD}' __EOF__ unset PATRONI_SUPERUSER_PASSWORD PATRONI_REPLICATION_PASSWORD exec /usr/bin/python3 /usr/local/bin/patroni /home/postgres/patroni.yml patroni-3.2.2/kubernetes/openshift-example/000077500000000000000000000000001455170150700207765ustar00rootroot00000000000000patroni-3.2.2/kubernetes/openshift-example/README.md000066400000000000000000000025551455170150700222640ustar00rootroot00000000000000# Patroni OpenShift Configuration Patroni can be run in OpenShift. Based on the kubernetes configuration, the Dockerfile and Entrypoint has been modified to support the dynamic UID/GID configuration that is applied in OpenShift. This can be run under the standard `restricted` SCC. # Examples ## Create test project ``` oc new-project patroni-test ``` ## Build the image Note: Update the references when merged upstream. Note: If deploying as a template for multiple users, the following commands should be performed in a shared namespace like `openshift`. ``` oc import-image postgres:10 --confirm -n openshift oc new-build https://github.com/zalando/patroni --context-dir=kubernetes -n openshift ``` ## Deploy the Image Two configuration templates exist in [templates](templates) directory: - Patroni Ephemeral - Patroni Persistent The only difference is whether or not the statefulset requests persistent storage. ## Create the Template Install the template into the `openshift` namespace if this should be shared across projects: ``` oc create -f templates/template_patroni_ephemeral.yml -n openshift ``` Then, from your own project: ``` oc new-app patroni-pgsql-ephemeral ``` Once the pods are running, two configmaps should be available: ``` $ oc get configmap NAME DATA AGE patroniocp-config 0 1m patroniocp-leader 0 1m ``` patroni-3.2.2/kubernetes/openshift-example/templates/000077500000000000000000000000001455170150700227745ustar00rootroot00000000000000patroni-3.2.2/kubernetes/openshift-example/templates/template_patroni_ephemeral.yml000066400000000000000000000224121455170150700311110ustar00rootroot00000000000000apiVersion: v1 kind: Template metadata: name: patroni-pgsql-ephemeral annotations: description: |- Patroni Postgresql database cluster, without persistent storage. WARNING: Any data stored will be lost upon pod destruction. Only use this template for testing. iconClass: icon-postgresql openshift.io/display-name: Patroni Postgresql (Ephemeral) openshift.io/long-description: This template deploys a a patroni postgresql HA cluster without persistent storage. tags: postgresql objects: - apiVersion: v1 kind: Service metadata: creationTimestamp: null labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} name: ${PATRONI_CLUSTER_NAME} spec: ports: - port: 5432 protocol: TCP targetPort: 5432 sessionAffinity: None type: ClusterIP status: loadBalancer: {} - apiVersion: v1 kind: Service metadata: creationTimestamp: null labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} name: ${PATRONI_MASTER_SERVICE_NAME} spec: ports: - port: 5432 protocol: TCP targetPort: 5432 selector: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} role: master sessionAffinity: None type: ClusterIP status: loadBalancer: {} - apiVersion: v1 kind: Secret metadata: name: ${PATRONI_CLUSTER_NAME} labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} stringData: superuser-password: ${PATRONI_SUPERUSER_PASSWORD} replication-password: ${PATRONI_REPLICATION_PASSWORD} - apiVersion: v1 kind: Service metadata: creationTimestamp: null labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} name: ${PATRONI_REPLICA_SERVICE_NAME} spec: ports: - port: 5432 protocol: TCP targetPort: 5432 selector: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} role: replica sessionAffinity: None type: ClusterIP status: loadBalancer: {} - apiVersion: apps/v1 kind: StatefulSet metadata: creationTimestamp: null generation: 3 labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} name: ${APPLICATION_NAME} spec: podManagementPolicy: OrderedReady replicas: 3 revisionHistoryLimit: 10 selector: matchLabels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} serviceName: ${APPLICATION_NAME} template: metadata: creationTimestamp: null labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} spec: containers: - env: - name: PATRONI_KUBERNETES_POD_IP valueFrom: fieldRef: apiVersion: v1 fieldPath: status.podIP - name: PATRONI_KUBERNETES_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - name: PATRONI_KUBERNETES_BYPASS_API_SERVICE value: 'true' - name: PATRONI_KUBERNETES_LABELS value: '{application: ${APPLICATION_NAME}, cluster-name: ${PATRONI_CLUSTER_NAME}}' - name: PATRONI_SUPERUSER_USERNAME value: ${PATRONI_SUPERUSER_USERNAME} - name: PATRONI_SUPERUSER_PASSWORD valueFrom: secretKeyRef: key: superuser-password name: ${PATRONI_CLUSTER_NAME} - name: PATRONI_REPLICATION_USERNAME value: ${PATRONI_REPLICATION_USERNAME} - name: PATRONI_REPLICATION_PASSWORD valueFrom: secretKeyRef: key: replication-password name: ${PATRONI_CLUSTER_NAME} - name: PATRONI_SCOPE value: ${PATRONI_CLUSTER_NAME} - name: PATRONI_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name - name: PATRONI_POSTGRESQL_DATA_DIR value: /home/postgres/pgdata/pgroot/data - name: PATRONI_POSTGRESQL_PGPASS value: /tmp/pgpass - name: PATRONI_POSTGRESQL_LISTEN value: 0.0.0.0:5432 - name: PATRONI_RESTAPI_LISTEN value: 0.0.0.0:8008 image: docker-registry.default.svc:5000/${NAMESPACE}/patroni:latest imagePullPolicy: IfNotPresent name: ${APPLICATION_NAME} readinessProbe: httpGet: scheme: HTTP path: /readiness port: 8008 initialDelaySeconds: 3 periodSeconds: 10 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 3 ports: - containerPort: 8008 protocol: TCP - containerPort: 5432 protocol: TCP resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /home/postgres/pgdata name: pgdata dnsPolicy: ClusterFirst restartPolicy: Always schedulerName: default-scheduler securityContext: {} serviceAccount: ${SERVICE_ACCOUNT} serviceAccountName: ${SERVICE_ACCOUNT} terminationGracePeriodSeconds: 0 volumes: - name: pgdata emptyDir: {} updateStrategy: type: OnDelete - apiVersion: v1 kind: Endpoints metadata: name: ${APPLICATION_NAME} labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} subsets: [] - apiVersion: v1 kind: ServiceAccount metadata: name: ${SERVICE_ACCOUNT} - apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: ${SERVICE_ACCOUNT} rules: - apiGroups: - "" resources: - configmaps verbs: - create - get - list - patch - update - watch # delete is required only for 'patronictl remove' - delete - apiGroups: - "" resources: - endpoints verbs: - get - patch - update # the following three privileges are necessary only when using endpoints - create - list - watch # delete is required only for for 'patronictl remove' - delete - apiGroups: - "" resources: - pods verbs: - get - list - patch - update - watch - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: ${SERVICE_ACCOUNT} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: ${SERVICE_ACCOUNT} subjects: - kind: ServiceAccount name: ${SERVICE_ACCOUNT} # Following privileges are only required if deployed not in the "default" # namespace and you want Patroni to bypass kubernetes service # (PATRONI_KUBERNETES_BYPASS_API_SERVICE=true) - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: patroni-k8s-ep-access rules: - apiGroups: - "" resources: - endpoints resourceNames: - kubernetes verbs: - get - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: ${NAMESPACE}-${SERVICE_ACCOUNT}-k8s-ep-access roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: patroni-k8s-ep-access subjects: - kind: ServiceAccount name: ${SERVICE_ACCOUNT} namespace: ${NAMESPACE} parameters: - description: The name of the application for labelling all artifacts. displayName: Application Name name: APPLICATION_NAME value: patroni-ephemeral - description: The name of the patroni-pgsql cluster. displayName: Cluster Name name: PATRONI_CLUSTER_NAME value: patroni-ephemeral - description: The name of the OpenShift Service exposed for the patroni-ephemeral-master container. displayName: Master service name. name: PATRONI_MASTER_SERVICE_NAME value: patroni-ephemeral-master - description: The name of the OpenShift Service exposed for the patroni-ephemeral-replica containers. displayName: Replica service name. name: PATRONI_REPLICA_SERVICE_NAME value: patroni-ephemeral-replica - description: Maximum amount of memory the container can use. displayName: Memory Limit name: MEMORY_LIMIT value: 512Mi - description: The OpenShift Namespace where the patroni and postgresql ImageStream resides. displayName: ImageStream Namespace name: NAMESPACE value: openshift - description: Username of the superuser account for initialization. displayName: Superuser Username name: PATRONI_SUPERUSER_USERNAME value: postgres - description: Password of the superuser account for initialization. displayName: Superuser Passsword name: PATRONI_SUPERUSER_PASSWORD value: postgres - description: Username of the replication account for initialization. displayName: Replication Username name: PATRONI_REPLICATION_USERNAME value: postgres - description: Password of the replication account for initialization. displayName: Repication Passsword name: PATRONI_REPLICATION_PASSWORD value: postgres - description: Service account name used for pods and rolebindings to form a cluster in the project. displayName: Service Account name: SERVICE_ACCOUNT value: patroniocp patroni-3.2.2/kubernetes/openshift-example/templates/template_patroni_persistent.yaml000066400000000000000000000242541455170150700315160ustar00rootroot00000000000000apiVersion: v1 kind: Template metadata: name: patroni-pgsql-persistent annotations: description: |- Patroni Postgresql database cluster, with persistent storage. iconClass: icon-postgresql openshift.io/display-name: Patroni Postgresql (Persistent) openshift.io/long-description: This template deploys a a patroni postgresql HA cluster with persistent storage. tags: postgresql objects: - apiVersion: v1 kind: Service metadata: creationTimestamp: null labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} name: ${PATRONI_CLUSTER_NAME} spec: ports: - port: 5432 protocol: TCP targetPort: 5432 sessionAffinity: None type: ClusterIP status: loadBalancer: {} - apiVersion: v1 kind: Service metadata: creationTimestamp: null labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} name: ${PATRONI_MASTER_SERVICE_NAME} spec: ports: - port: 5432 protocol: TCP targetPort: 5432 selector: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} role: master sessionAffinity: None type: ClusterIP status: loadBalancer: {} - apiVersion: v1 kind: Secret metadata: name: ${PATRONI_CLUSTER_NAME} labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} stringData: superuser-password: ${PATRONI_SUPERUSER_PASSWORD} replication-password: ${PATRONI_REPLICATION_PASSWORD} - apiVersion: v1 kind: Service metadata: creationTimestamp: null labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} name: ${PATRONI_REPLICA_SERVICE_NAME} spec: ports: - port: 5432 protocol: TCP targetPort: 5432 selector: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} role: replica sessionAffinity: None type: ClusterIP status: loadBalancer: {} - apiVersion: apps/v1 kind: StatefulSet metadata: creationTimestamp: null generation: 3 labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} name: ${APPLICATION_NAME} spec: podManagementPolicy: OrderedReady replicas: 3 revisionHistoryLimit: 10 selector: matchLabels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} serviceName: ${APPLICATION_NAME} template: metadata: creationTimestamp: null labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} spec: initContainers: - command: - sh - -c - "mkdir -p /home/postgres/pgdata/pgroot/data && chmod 0700 /home/postgres/pgdata/pgroot/data" image: docker-registry.default.svc:5000/${NAMESPACE}/patroni:latest imagePullPolicy: IfNotPresent name: fix-perms resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /home/postgres/pgdata name: ${APPLICATION_NAME} containers: - env: - name: PATRONI_KUBERNETES_POD_IP valueFrom: fieldRef: apiVersion: v1 fieldPath: status.podIP - name: PATRONI_KUBERNETES_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - name: PATRONI_KUBERNETES_BYPASS_API_SERVICE value: 'true' - name: PATRONI_KUBERNETES_LABELS value: '{application: ${APPLICATION_NAME}, cluster-name: ${PATRONI_CLUSTER_NAME}}' - name: PATRONI_SUPERUSER_USERNAME value: ${PATRONI_SUPERUSER_USERNAME} - name: PATRONI_SUPERUSER_PASSWORD valueFrom: secretKeyRef: key: superuser-password name: ${PATRONI_CLUSTER_NAME} - name: PATRONI_REPLICATION_USERNAME value: ${PATRONI_REPLICATION_USERNAME} - name: PATRONI_REPLICATION_PASSWORD valueFrom: secretKeyRef: key: replication-password name: ${PATRONI_CLUSTER_NAME} - name: PATRONI_SCOPE value: ${PATRONI_CLUSTER_NAME} - name: PATRONI_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name - name: PATRONI_POSTGRESQL_DATA_DIR value: /home/postgres/pgdata/pgroot/data - name: PATRONI_POSTGRESQL_PGPASS value: /tmp/pgpass - name: PATRONI_POSTGRESQL_LISTEN value: 0.0.0.0:5432 - name: PATRONI_RESTAPI_LISTEN value: 0.0.0.0:8008 image: docker-registry.default.svc:5000/${NAMESPACE}/patroni:latest imagePullPolicy: IfNotPresent name: ${APPLICATION_NAME} readinessProbe: httpGet: scheme: HTTP path: /readiness port: 8008 initialDelaySeconds: 3 periodSeconds: 10 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 3 ports: - containerPort: 8008 protocol: TCP - containerPort: 5432 protocol: TCP resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /home/postgres/pgdata name: ${APPLICATION_NAME} dnsPolicy: ClusterFirst restartPolicy: Always schedulerName: default-scheduler securityContext: {} serviceAccount: ${SERVICE_ACCOUNT} serviceAccountName: ${SERVICE_ACCOUNT} terminationGracePeriodSeconds: 0 volumes: - name: ${APPLICATION_NAME} persistentVolumeClaim: claimName: ${APPLICATION_NAME} volumeClaimTemplates: - metadata: labels: application: ${APPLICATION_NAME} name: ${APPLICATION_NAME} spec: accessModes: - ReadWriteOnce resources: requests: storage: ${PVC_SIZE} updateStrategy: type: OnDelete - apiVersion: v1 kind: Endpoints metadata: name: ${APPLICATION_NAME} labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} subsets: [] - apiVersion: v1 kind: ServiceAccount metadata: name: ${SERVICE_ACCOUNT} - apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: ${SERVICE_ACCOUNT} rules: - apiGroups: - "" resources: - configmaps verbs: - create - get - list - patch - update - watch # delete is required only for 'patronictl remove' - delete - apiGroups: - "" resources: - endpoints verbs: - get - patch - update # the following three privileges are necessary only when using endpoints - create - list - watch # delete is required only for for 'patronictl remove' - delete - apiGroups: - "" resources: - pods verbs: - get - list - patch - update - watch - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: ${SERVICE_ACCOUNT} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: ${SERVICE_ACCOUNT} subjects: - kind: ServiceAccount name: ${SERVICE_ACCOUNT} # Following privileges are only required if deployed not in the "default" # namespace and you want Patroni to bypass kubernetes service # (PATRONI_KUBERNETES_BYPASS_API_SERVICE=true) - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: patroni-k8s-ep-access rules: - apiGroups: - "" resources: - endpoints resourceNames: - kubernetes verbs: - get - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: ${NAMESPACE}-${SERVICE_ACCOUNT}-k8s-ep-access roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: patroni-k8s-ep-access subjects: - kind: ServiceAccount name: ${SERVICE_ACCOUNT} namespace: ${NAMESPACE} parameters: - description: The name of the application for labelling all artifacts. displayName: Application Name name: APPLICATION_NAME value: patroni-persistent - description: The name of the patroni-pgsql cluster. displayName: Cluster Name name: PATRONI_CLUSTER_NAME value: patroni-persistent - description: The name of the OpenShift Service exposed for the patroni-persistent-master container. displayName: Master service name. name: PATRONI_MASTER_SERVICE_NAME value: patroni-persistent-master - description: The name of the OpenShift Service exposed for the patroni-persistent-replica containers. displayName: Replica service name. name: PATRONI_REPLICA_SERVICE_NAME value: patroni-persistent-replica - description: Maximum amount of memory the container can use. displayName: Memory Limit name: MEMORY_LIMIT value: 512Mi - description: The OpenShift Namespace where the patroni and postgresql ImageStream resides. displayName: ImageStream Namespace name: NAMESPACE value: openshift - description: Username of the superuser account for initialization. displayName: Superuser Username name: PATRONI_SUPERUSER_USERNAME value: postgres - description: Password of the superuser account for initialization. displayName: Superuser Passsword name: PATRONI_SUPERUSER_PASSWORD value: postgres - description: Username of the replication account for initialization. displayName: Replication Username name: PATRONI_REPLICATION_USERNAME value: postgres - description: Password of the replication account for initialization. displayName: Repication Passsword name: PATRONI_REPLICATION_PASSWORD value: postgres - description: Service account name used for pods and rolebindings to form a cluster in the project. displayName: Service Account name: SERVICE_ACCOUNT value: patroni-persistent - description: The size of the persistent volume to create. displayName: Persistent Volume Size name: PVC_SIZE value: 5Gi patroni-3.2.2/kubernetes/openshift-example/test/000077500000000000000000000000001455170150700217555ustar00rootroot00000000000000patroni-3.2.2/kubernetes/openshift-example/test/Jenkinsfile000066400000000000000000000023601455170150700241420ustar00rootroot00000000000000pipeline { agent any stages { stage ('Deploy test pod'){ when { expression { openshift.withCluster() { openshift.withProject() { return !openshift.selector( "dc", "pgbench" ).exists() } } } } steps { script { openshift.withCluster() { openshift.withProject() { def pgbench = openshift.newApp( "https://github.com/stewartshea/docker-pgbench/", "--name=pgbench", "-e PGPASSWORD=postgres", "-e PGUSER=postgres", "-e PGHOST=patroni-persistent-master", "-e PGDATABASE=postgres", "-e TEST_CLIENT_COUNT=20", "-e TEST_DURATION=120" ) def pgbenchdc = openshift.selector( "dc", "pgbench" ) timeout(5) { pgbenchdc.rollout().status() } } } } } } stage ('Run benchmark Test'){ steps { sh ''' oc exec $(oc get pods -l app=pgbench | grep Running | awk '{print $1}') ./test.sh ''' } } stage ('Clean up pgtest pod'){ steps { sh ''' oc delete all -l app=pgbench ''' } } } } patroni-3.2.2/kubernetes/openshift-example/test/README.md000066400000000000000000000002701455170150700232330ustar00rootroot00000000000000# Jenkins Test This pipeline test will create a separate deployment config for a pgbench pod and execute a test against the patroni cluster. This is a sample and should be customized. patroni-3.2.2/kubernetes/patroni_k8s.yaml000066400000000000000000000142061455170150700204760ustar00rootroot00000000000000# headless service to avoid deletion of patronidemo-config endpoint apiVersion: v1 kind: Service metadata: name: patronidemo-config labels: application: patroni cluster-name: patronidemo spec: clusterIP: None --- apiVersion: apps/v1 kind: StatefulSet metadata: name: &cluster_name patronidemo labels: application: patroni cluster-name: *cluster_name spec: replicas: 3 serviceName: *cluster_name selector: matchLabels: application: patroni cluster-name: *cluster_name template: metadata: labels: application: patroni cluster-name: *cluster_name spec: serviceAccountName: patronidemo containers: - name: *cluster_name image: patroni # docker build -t patroni . imagePullPolicy: IfNotPresent readinessProbe: httpGet: scheme: HTTP path: /readiness port: 8008 initialDelaySeconds: 3 periodSeconds: 10 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 3 ports: - containerPort: 8008 protocol: TCP - containerPort: 5432 protocol: TCP volumeMounts: - mountPath: /home/postgres/pgdata name: pgdata env: - name: PATRONI_KUBERNETES_POD_IP valueFrom: fieldRef: fieldPath: status.podIP - name: PATRONI_KUBERNETES_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: PATRONI_KUBERNETES_BYPASS_API_SERVICE value: 'true' - name: PATRONI_KUBERNETES_USE_ENDPOINTS value: 'true' - name: PATRONI_KUBERNETES_LABELS value: '{application: patroni, cluster-name: patronidemo}' - name: PATRONI_SUPERUSER_USERNAME value: postgres - name: PATRONI_SUPERUSER_PASSWORD valueFrom: secretKeyRef: name: *cluster_name key: superuser-password - name: PATRONI_REPLICATION_USERNAME value: standby - name: PATRONI_REPLICATION_PASSWORD valueFrom: secretKeyRef: name: *cluster_name key: replication-password - name: PATRONI_SCOPE value: *cluster_name - name: PATRONI_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: PATRONI_POSTGRESQL_DATA_DIR value: /home/postgres/pgdata/pgroot/data - name: PATRONI_POSTGRESQL_PGPASS value: /tmp/pgpass - name: PATRONI_POSTGRESQL_LISTEN value: '0.0.0.0:5432' - name: PATRONI_RESTAPI_LISTEN value: '0.0.0.0:8008' terminationGracePeriodSeconds: 0 volumes: - name: pgdata emptyDir: {} # volumeClaimTemplates: # - metadata: # labels: # application: spilo # spilo-cluster: *cluster_name # annotations: # volume.alpha.kubernetes.io/storage-class: anything # name: pgdata # spec: # accessModes: # - ReadWriteOnce # resources: # requests: # storage: 5Gi --- apiVersion: v1 kind: Endpoints metadata: name: &cluster_name patronidemo labels: application: patroni cluster-name: *cluster_name subsets: [] --- apiVersion: v1 kind: Service metadata: name: &cluster_name patronidemo labels: application: patroni cluster-name: *cluster_name spec: type: ClusterIP ports: - port: 5432 targetPort: 5432 --- apiVersion: v1 kind: Service metadata: name: patronidemo-repl labels: application: patroni cluster-name: &cluster_name patronidemo role: replica spec: type: ClusterIP selector: application: patroni cluster-name: *cluster_name role: replica ports: - port: 5432 targetPort: 5432 --- apiVersion: v1 kind: Secret metadata: name: &cluster_name patronidemo labels: application: patroni cluster-name: *cluster_name type: Opaque data: superuser-password: emFsYW5kbw== replication-password: cmVwLXBhc3M= --- apiVersion: v1 kind: ServiceAccount metadata: name: patronidemo --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: patronidemo rules: - apiGroups: - "" resources: - configmaps verbs: - create - get - list - patch - update - watch # delete and deletecollection are required only for 'patronictl remove' - delete - deletecollection - apiGroups: - "" resources: - endpoints verbs: - get - patch - update # the following three privileges are necessary only when using endpoints - create - list - watch # delete and deletecollection are required only for for 'patronictl remove' - delete - deletecollection - apiGroups: - "" resources: - pods verbs: - get - list - patch - update - watch # The following privilege is only necessary for creation of headless service # for patronidemo-config endpoint, in order to prevent cleaning it up by the # k8s master. You can avoid giving this privilege by explicitly creating the # service like it is done in this manifest (lines 2..10) - apiGroups: - "" resources: - services verbs: - create --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: patronidemo roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: patronidemo subjects: - kind: ServiceAccount name: patronidemo # Following privileges are only required if deployed not in the "default" # namespace and you want Patroni to bypass kubernetes service # (PATRONI_KUBERNETES_BYPASS_API_SERVICE=true) --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: patroni-k8s-ep-access rules: - apiGroups: - "" resources: - endpoints resourceNames: - kubernetes verbs: - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: patroni-k8s-ep-access roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: patroni-k8s-ep-access subjects: - kind: ServiceAccount name: patronidemo # The namespace must be specified explicitly. # If deploying to the different namespace you have to change it. namespace: default patroni-3.2.2/mkbinary.sh000077500000000000000000000001361455170150700153520ustar00rootroot00000000000000#!/bin/sh set -e pip install --ignore-installed pyinstaller pyinstaller --clean patroni.spec patroni-3.2.2/patroni.py000077500000000000000000000001401455170150700152230ustar00rootroot00000000000000#!/usr/bin/env python from patroni.__main__ import main if __name__ == '__main__': main() patroni-3.2.2/patroni.spec000066400000000000000000000020411455170150700155240ustar00rootroot00000000000000# -*- mode: python -*- block_cipher = None def hiddenimports(): import sys sys.path.insert(0, '.') try: import patroni.dcs return patroni.dcs.dcs_modules() + ['http.server'] finally: sys.path.pop(0) a = Analysis(['patroni/__main__.py'], pathex=[], binaries=None, datas=[ ('patroni/postgresql/available_parameters/*.yml', 'patroni/postgresql/available_parameters'), ('patroni/postgresql/available_parameters/*.yaml', 'patroni/postgresql/available_parameters'), ], hiddenimports=hiddenimports(), hookspath=[], runtime_hooks=[], excludes=[], win_no_prefer_redirects=False, win_private_assemblies=False, cipher=block_cipher) pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher) exe = EXE(pyz, a.scripts, a.binaries, a.zipfiles, a.datas, name='patroni', debug=False, strip=False, upx=True, console=True) patroni-3.2.2/patroni/000077500000000000000000000000001455170150700146535ustar00rootroot00000000000000patroni-3.2.2/patroni/__init__.py000066400000000000000000000031201455170150700167600ustar00rootroot00000000000000"""Define general variables and functions for :mod:`patroni`. :var PATRONI_ENV_PREFIX: prefix for Patroni related configuration environment variables. :var KUBERNETES_ENV_PREFIX: prefix for Kubernetes related configuration environment variables. :var MIN_PSYCOPG2: minimum version of :mod:`psycopg2` required by Patroni to work. :var MIN_PSYCOPG3: minimum version of :mod:`psycopg` required by Patroni to work. """ from typing import Iterator, Tuple PATRONI_ENV_PREFIX = 'PATRONI_' KUBERNETES_ENV_PREFIX = 'KUBERNETES_' MIN_PSYCOPG2 = (2, 5, 4) MIN_PSYCOPG3 = (3, 0, 0) def parse_version(version: str) -> Tuple[int, ...]: """Convert *version* from human-readable format to tuple of integers. .. note:: Designed for easy comparison of software versions in Python. :param version: human-readable software version, e.g. ``2.5.4.dev1 (dt dec pq3 ext lo64)``. :returns: tuple of *version* parts, each part as an integer. :Example: >>> parse_version('2.5.4.dev1 (dt dec pq3 ext lo64)') (2, 5, 4) """ def _parse_version(version: str) -> Iterator[int]: """Yield each part of a human-readable version string as an integer. :param version: human-readable software version, e.g. ``2.5.4.dev1``. :yields: each part of *version* as an integer. :Example: >>> tuple(_parse_version('2.5.4.dev1')) (2, 5, 4) """ for e in version.split('.'): try: yield int(e) except ValueError: break return tuple(_parse_version(version.split(' ')[0])) patroni-3.2.2/patroni/__main__.py000066400000000000000000000355361455170150700167610ustar00rootroot00000000000000"""Patroni main entry point. Implement ``patroni`` main daemon and expose its entry point. """ import logging import os import signal import sys import time from argparse import Namespace from typing import Any, Dict, List, Optional, TYPE_CHECKING from patroni import MIN_PSYCOPG2, MIN_PSYCOPG3, parse_version from patroni.daemon import AbstractPatroniDaemon, abstract_main, get_base_arg_parser from patroni.tags import Tags if TYPE_CHECKING: # pragma: no cover from .config import Config logger = logging.getLogger(__name__) class Patroni(AbstractPatroniDaemon, Tags): """Implement ``patroni`` command daemon. :ivar version: Patroni version. :ivar dcs: DCS object. :ivar watchdog: watchdog handler, if configured to use watchdog. :ivar postgresql: managed Postgres instance. :ivar api: REST API server instance of this node. :ivar request: wrapper for performing HTTP requests. :ivar ha: HA handler. :ivar next_run: time when to run the next HA loop cycle. :ivar scheduled_restart: when a restart has been scheduled to occur, if any. In that case, should contain two keys: * ``schedule``: timestamp when restart should occur; * ``postmaster_start_time``: timestamp when Postgres was last started. """ def __init__(self, config: 'Config') -> None: """Create a :class:`Patroni` instance with the given *config*. Get a connection to the DCS, configure watchdog (if required), set up Patroni interface with Postgres, configure the HA loop and bring the REST API up. .. note:: Expected to be instantiated and run through :func:`~patroni.daemon.abstract_main`. :param config: Patroni configuration. """ from patroni.api import RestApiServer from patroni.dcs import get_dcs from patroni.ha import Ha from patroni.postgresql import Postgresql from patroni.request import PatroniRequest from patroni.version import __version__ from patroni.watchdog import Watchdog super(Patroni, self).__init__(config) self.version = __version__ self.dcs = get_dcs(self.config) self.request = PatroniRequest(self.config, True) self.ensure_unique_name() self.watchdog = Watchdog(self.config) self.load_dynamic_configuration() self.postgresql = Postgresql(self.config['postgresql']) self.api = RestApiServer(self, self.config['restapi']) self.ha = Ha(self) self._tags = self._get_tags() self.next_run = time.time() self.scheduled_restart: Dict[str, Any] = {} def load_dynamic_configuration(self) -> None: """Load Patroni dynamic configuration. Load dynamic configuration from the DCS, if `/config` key is available in the DCS, otherwise fall back to ``bootstrap.dcs`` section from the configuration file. If the DCS connection fails returning the exception :class:`~patroni.exceptions.DCSError` an attempt will be remade every 5 seconds. .. note:: This method is called only once, at the time when Patroni is started. """ from patroni.exceptions import DCSError while True: try: cluster = self.dcs.get_cluster() if cluster and cluster.config and cluster.config.data: if self.config.set_dynamic_configuration(cluster.config): self.dcs.reload_config(self.config) self.watchdog.reload_config(self.config) elif not self.config.dynamic_configuration and 'bootstrap' in self.config: if self.config.set_dynamic_configuration(self.config['bootstrap']['dcs']): self.dcs.reload_config(self.config) self.watchdog.reload_config(self.config) break except DCSError: logger.warning('Can not get cluster from dcs') time.sleep(5) def ensure_unique_name(self) -> None: """A helper method to prevent splitbrain from operator naming error.""" from patroni.dcs import Member cluster = self.dcs.get_cluster() if not cluster: return member = cluster.get_member(self.config['name'], False) if not isinstance(member, Member): return try: # Silence annoying WARNING: Retrying (...) messages when Patroni is quickly restarted. # At this moment we don't have custom log levels configured and hence shouldn't lose anything useful. self.logger.update_loggers({'urllib3.connectionpool': 'ERROR'}) _ = self.request(member, endpoint="/liveness", timeout=3) logger.fatal("Can't start; there is already a node named '%s' running", self.config['name']) sys.exit(1) except Exception: self.logger.update_loggers({}) def _get_tags(self) -> Dict[str, Any]: """Get tags configured for this node, if any. :returns: a dictionary of tags set for this node. """ return self._filter_tags(self.config.get('tags', {})) def reload_config(self, sighup: bool = False, local: Optional[bool] = False) -> None: """Apply new configuration values for ``patroni`` daemon. Reload: * Cached tags; * Request wrapper configuration; * REST API configuration; * Watchdog configuration; * Postgres configuration; * DCS configuration. :param sighup: if it is related to a SIGHUP signal. :param local: if there has been changes to the local configuration file. """ try: super(Patroni, self).reload_config(sighup, local) if local: self._tags = self._get_tags() self.request.reload_config(self.config) if local or sighup and self.api.reload_local_certificate(): self.api.reload_config(self.config['restapi']) self.watchdog.reload_config(self.config) self.postgresql.reload_config(self.config['postgresql'], sighup) self.dcs.reload_config(self.config) except Exception: logger.exception('Failed to reload config_file=%s', self.config.config_file) @property def tags(self) -> Dict[str, Any]: """Tags configured for this node, if any.""" return self._tags def schedule_next_run(self) -> None: """Schedule the next run of the ``patroni`` daemon main loop. Next run is scheduled based on previous run plus value of ``loop_wait`` configuration from DCS. If that has already been exceeded, run the next cycle immediately. """ self.next_run += self.dcs.loop_wait current_time = time.time() nap_time = self.next_run - current_time if nap_time <= 0: self.next_run = current_time # Release the GIL so we don't starve anyone waiting on async_executor lock time.sleep(0.001) # Warn user that Patroni is not keeping up logger.warning("Loop time exceeded, rescheduling immediately.") elif self.ha.watch(nap_time): self.next_run = time.time() def run(self) -> None: """Run ``patroni`` daemon process main loop. Start the REST API and keep running HA cycles every ``loop_wait`` seconds. """ self.api.start() self.next_run = time.time() super(Patroni, self).run() def _run_cycle(self) -> None: """Run a cycle of the ``patroni`` daemon main loop. Run an HA cycle and schedule the next cycle run. If any dynamic configuration change request is detected, apply the change and cache the new dynamic configuration values in ``patroni.dynamic.json`` file under Postgres data directory. """ logger.info(self.ha.run_cycle()) if self.dcs.cluster and self.dcs.cluster.config and self.dcs.cluster.config.data \ and self.config.set_dynamic_configuration(self.dcs.cluster.config): self.reload_config() if self.postgresql.role != 'uninitialized': self.config.save_cache() self.schedule_next_run() def _shutdown(self) -> None: """Perform shutdown of ``patroni`` daemon process. Shut down the REST API and the HA handler. """ try: self.api.shutdown() except Exception: logger.exception('Exception during RestApi.shutdown') try: self.ha.shutdown() except Exception: logger.exception('Exception during Ha.shutdown') def patroni_main(configfile: str) -> None: """Configure and start ``patroni`` main daemon process. :param configfile: path to Patroni configuration file. """ abstract_main(Patroni, configfile) def process_arguments() -> Namespace: """Process command-line arguments. Create a basic command-line parser through :func:`~patroni.daemon.get_base_arg_parser`, extend its capabilities by adding these flags and parse command-line arguments.: * ``--validate-config`` -- used to validate the Patroni configuration file * ``--generate-config`` -- used to generate Patroni configuration from a running PostgreSQL instance * ``--generate-sample-config`` -- used to generate a sample Patroni configuration .. note:: If running with ``--generate-config``, ``--generate-sample-config`` or ``--validate-flag`` will exit after generating or validating configuration. :returns: parsed arguments, if not running with ``--validate-config`` flag. """ from patroni.config_generator import generate_config parser = get_base_arg_parser() group = parser.add_mutually_exclusive_group() group.add_argument('--validate-config', action='store_true', help='Run config validator and exit') group.add_argument('--generate-sample-config', action='store_true', help='Generate a sample Patroni yaml configuration file') group.add_argument('--generate-config', action='store_true', help='Generate a Patroni yaml configuration file for a running instance') parser.add_argument('--dsn', help='Optional DSN string of the instance to be used as a source \ for config generation. Superuser connection is required.') args = parser.parse_args() if args.generate_sample_config: generate_config(args.configfile, True, None) sys.exit(0) elif args.generate_config: generate_config(args.configfile, False, args.dsn) sys.exit(0) elif args.validate_config: from patroni.validator import schema from patroni.config import Config, ConfigParseError try: Config(args.configfile, validator=schema) sys.exit() except ConfigParseError as e: sys.exit(e.value) return args def check_psycopg() -> None: """Ensure at least one among :mod:`psycopg2` or :mod:`psycopg` libraries are available in the environment. .. note:: Patroni chooses :mod:`psycopg2` over :mod:`psycopg`, if possible. If nothing meeting the requirements is found, then exit with a fatal message. """ min_psycopg2_str = '.'.join(map(str, MIN_PSYCOPG2)) min_psycopg3_str = '.'.join(map(str, MIN_PSYCOPG3)) available_versions: List[str] = [] # try psycopg2 try: from psycopg2 import __version__ if parse_version(__version__) >= MIN_PSYCOPG2: return available_versions.append('psycopg2=={0}'.format(__version__.split(' ')[0])) except ImportError: logger.debug('psycopg2 module is not available') # try psycopg3 try: from psycopg import __version__ if parse_version(__version__) >= MIN_PSYCOPG3: return available_versions.append('psycopg=={0}'.format(__version__.split(' ')[0])) except ImportError: logger.debug('psycopg module is not available') error = f'FATAL: Patroni requires psycopg2>={min_psycopg2_str}, psycopg2-binary, or psycopg>={min_psycopg3_str}' if available_versions: error += ', but only {0} {1} available'.format( ' and '.join(available_versions), 'is' if len(available_versions) == 1 else 'are') sys.exit(error) def main() -> None: """Main entrypoint of :mod:`patroni.__main__`. Process command-line arguments, ensure :mod:`psycopg2` (or :mod:`psycopg`) attendee the pre-requisites and start ``patroni`` daemon process. .. note:: If running through a Docker container, make the main process take care of init process duties and run ``patroni`` daemon as another process. In that case relevant signals received by the main process and forwarded to ``patroni`` daemon process. """ from multiprocessing import freeze_support # Executables created by PyInstaller are frozen, thus we need to enable frozen support for # :mod:`multiprocessing` to avoid :class:`RuntimeError` exceptions. freeze_support() check_psycopg() args = process_arguments() if os.getpid() != 1: return patroni_main(args.configfile) # Patroni started with PID=1, it looks like we are in the container from types import FrameType pid = 0 # Looks like we are in a docker, so we will act like init def sigchld_handler(signo: int, stack_frame: Optional[FrameType]) -> None: """Handle ``SIGCHLD`` received by main process from ``patroni`` daemon when the daemon terminates. :param signo: signal number. :param stack_frame: current stack frame. """ try: # log exit code of all children processes, and break loop when there is none left while True: ret = os.waitpid(-1, os.WNOHANG) if ret == (0, 0): break elif ret[0] != pid: logger.info('Reaped pid=%s, exit status=%s', *ret) except OSError: pass def passtochild(signo: int, stack_frame: Optional[FrameType]) -> None: """Forward a signal *signo* from main process to child process. :param signo: signal number. :param stack_frame: current stack frame. """ if pid: os.kill(pid, signo) if os.name != 'nt': signal.signal(signal.SIGCHLD, sigchld_handler) signal.signal(signal.SIGHUP, passtochild) signal.signal(signal.SIGQUIT, passtochild) signal.signal(signal.SIGUSR1, passtochild) signal.signal(signal.SIGUSR2, passtochild) signal.signal(signal.SIGINT, passtochild) signal.signal(signal.SIGABRT, passtochild) signal.signal(signal.SIGTERM, passtochild) import multiprocessing patroni = multiprocessing.Process(target=patroni_main, args=(args.configfile,)) patroni.start() pid = patroni.pid patroni.join() if __name__ == '__main__': main() patroni-3.2.2/patroni/api.py000066400000000000000000002443521455170150700160100ustar00rootroot00000000000000"""Implement Patroni's REST API. Exposes a REST API of patroni operations functions, such as status, performance and management to web clients. Much of what can be achieved with the command line tool patronictl can be done via the API. Patroni CLI and daemon utilises the API to perform these functions. """ import base64 import hmac import json import logging import time import traceback import dateutil.parser import datetime import os import socket import sys from http.server import BaseHTTPRequestHandler, HTTPServer from ipaddress import ip_address, ip_network, IPv4Network, IPv6Network from socketserver import ThreadingMixIn from threading import Thread from urllib.parse import urlparse, parse_qs from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, TYPE_CHECKING, Union from . import psycopg from .__main__ import Patroni from .dcs import Cluster from .exceptions import PostgresConnectionException, PostgresException from .postgresql.misc import postgres_version_to_int from .utils import deep_compare, enable_keepalive, parse_bool, patch_config, Retry, \ RetryFailedError, parse_int, split_host_port, tzutc, uri, cluster_as_json logger = logging.getLogger(__name__) def check_access(func: Callable[..., None]) -> Callable[..., None]: """Check the source ip, authorization header, or client certificates. .. note:: The actual logic to check access is implemented through :func:`RestApiServer.check_access`. :param func: function to be decorated. :returns: a decorator that executes *func* only if :func:`RestApiServer.check_access` returns ``True``. :Example: >>> class FooServer: ... def check_access(self, *args, **kwargs): ... print(f'In FooServer: {args[0].__class__.__name__}') ... return True ... >>> class Foo: ... server = FooServer() ... @check_access ... def do_PUT_foo(self): ... print('In do_PUT_foo') >>> f = Foo() >>> f.do_PUT_foo() In FooServer: Foo In do_PUT_foo """ def wrapper(self: 'RestApiHandler', *args: Any, **kwargs: Any) -> None: if self.server.check_access(self): return func(self, *args, **kwargs) return wrapper class RestApiHandler(BaseHTTPRequestHandler): """Define how to handle each of the requests that are made against the REST API server.""" # Comment from pyi stub file. These unions can cause typing errors with IDEs, e.g. PyCharm # # Those are technically of types, respectively: # * _RequestType = Union[socket.socket, Tuple[bytes, socket.socket]] # * _AddressType = Tuple[str, int] # But there are some concerns that having unions here would cause # too much inconvenience to people using it (see # https://github.com/python/typeshed/pull/384#issuecomment-234649696) def __init__(self, request: Any, client_address: Any, server: Union['RestApiServer', HTTPServer]) -> None: """Create a :class:`RestApiHandler` instance. .. note:: Currently not different from its superclass :func:`__init__`, and only used so ``pyright`` can understand the type of ``server`` attribute. :param request: client request to be processed. :param client_address: address of the client connection. :param server: HTTP server that received the request. """ if TYPE_CHECKING: # pragma: no cover assert isinstance(server, RestApiServer) super(RestApiHandler, self).__init__(request, client_address, server) self.server: 'RestApiServer' = server # pyright: ignore [reportIncompatibleVariableOverride] self.__start_time: float = 0.0 self.path_query: Dict[str, List[str]] = {} def _write_status_code_only(self, status_code: int) -> None: """Write a response that is composed only of the HTTP status. The response is written with these values separated by space: * HTTP protocol version; * *status_code*; * description of *status_code*. .. note:: This is usually useful for replying to requests from software like HAProxy. :param status_code: HTTP status code. :Example: * ``_write_status_code_only(200)`` would write a response like ``HTTP/1.0 200 OK``. """ message = self.responses[status_code][0] self.wfile.write('{0} {1} {2}\r\n\r\n'.format(self.protocol_version, status_code, message).encode('utf-8')) self.log_request(status_code) def write_response(self, status_code: int, body: str, content_type: str = 'text/html', headers: Optional[Dict[str, str]] = None) -> None: """Write an HTTP response. .. note:: Besides ``Content-Type`` header, and the HTTP headers passed through *headers*, this function will also write the HTTP headers defined through ``restapi.http_extra_headers`` and ``restapi.https_extra_headers`` from Patroni configuration. :param status_code: response HTTP status code. :param body: response body. :param content_type: value for ``Content-Type`` HTTP header. :param headers: dictionary of additional HTTP headers to set for the response. Each key is the header name, and the corresponding value is the value for the header in the response. """ # TODO: try-catch ConnectionResetError: [Errno 104] Connection reset by peer and log it in DEBUG level self.send_response(status_code) headers = headers or {} if content_type: headers['Content-Type'] = content_type for name, value in headers.items(): self.send_header(name, value) for name, value in (self.server.http_extra_headers or {}).items(): self.send_header(name, value) self.end_headers() self.wfile.write(body.encode('utf-8')) def _write_json_response(self, status_code: int, response: Any) -> None: """Write an HTTP response with a JSON content type. Call :func:`write_response` with ``content_type`` as ``application/json``. :param status_code: response HTTP status code. :param response: value to be dumped as a JSON string and to be used as the response body. """ self.write_response(status_code, json.dumps(response, default=str), content_type='application/json') def _write_status_response(self, status_code: int, response: Dict[str, Any]) -> None: """Write an HTTP response with Patroni/Postgres status in JSON format. Modifies *response* before sending it to the client. Defines the ``patroni`` key, which is a dictionary that contains the mandatory keys: * ``version``: Patroni version, e.g. ``3.0.2``; * ``scope``: value of ``scope`` setting from Patroni configuration. May also add the following optional keys, depending on the status of this Patroni/PostgreSQL node: * ``tags``: tags that were set through Patroni configuration merged with dynamically applied tags; * ``database_system_identifier``: ``Database system identifier`` from ``pg_controldata`` output; * ``pending_restart``: ``True`` if PostgreSQL is pending to be restarted; * ``scheduled_restart``: a dictionary with a single key ``schedule``, which is the timestamp for the scheduled restart; * ``watchdog_failed``: ``True`` if watchdog device is unhealthy; * ``logger_queue_size``: log queue length if it is longer than expected; * ``logger_records_lost``: number of log records that have been lost while the log queue was full. :param status_code: response HTTP status code. :param response: represents the status of the PostgreSQL node, and is used as a basis for the HTTP response. This dictionary is built through :func:`get_postgresql_status`. """ patroni = self.server.patroni tags = patroni.ha.get_effective_tags() if tags: response['tags'] = tags if patroni.postgresql.sysid: response['database_system_identifier'] = patroni.postgresql.sysid if patroni.postgresql.pending_restart: response['pending_restart'] = True response['patroni'] = { 'version': patroni.version, 'scope': patroni.postgresql.scope, 'name': patroni.postgresql.name } if patroni.scheduled_restart: response['scheduled_restart'] = patroni.scheduled_restart.copy() del response['scheduled_restart']['postmaster_start_time'] response['scheduled_restart']['schedule'] = (response['scheduled_restart']['schedule']).isoformat() if not patroni.ha.watchdog.is_healthy: response['watchdog_failed'] = True qsize = patroni.logger.queue_size if qsize > patroni.logger.NORMAL_LOG_QUEUE_SIZE: response['logger_queue_size'] = qsize lost = patroni.logger.records_lost if lost: response['logger_records_lost'] = lost self._write_json_response(status_code, response) def do_GET(self, write_status_code_only: bool = False) -> None: """Process all GET requests which can not be routed to other methods. Is used for handling all health-checks requests. E.g. "GET /(primary|replica|sync|async|etc...)". The (optional) query parameters and the HTTP response status depend on the requested path: * ``/``, ``primary``, or ``read-write``: * HTTP status ``200``: if a primary with the leader lock. * ``/standby-leader``: * HTTP status ``200``: if holds the leader lock in a standby cluster. * ``/leader``: * HTTP status ``200``: if holds the leader lock. * ``/replica``: * Query parameters: * ``lag``: only accept replication lag up to ``lag``. Accepts either an :class:`int`, which represents lag in bytes, or a :class:`str` representing lag in human-readable format (e.g. ``10MB``). * Any custom parameter: will attempt to match them against node tags. * HTTP status ``200``: if up and running as a standby and without ``noloadbalance`` tag. * ``/read-only``: * HTTP status ``200``: if up and running and without ``noloadbalance`` tag. * ``/synchronous`` or ``/sync``: * HTTP status ``200``: if up and running as a synchronous standby. * ``/read-only-sync``: * HTTP status ``200``: if up and running as a synchronous standby or primary. * ``/asynchronous``: * Query parameters: * ``lag``: only accept replication lag up to ``lag``. Accepts either an :class:`int`, which represents lag in bytes, or a :class:`str` representing lag in human-readable format (e.g. ``10MB``). * HTTP status ``200``: if up and running as an asynchronous standby. * ``/health``: * HTTP status ``200``: if up and running. .. note:: If not able to honor the query parameter, or not able to match the condition described for HTTP status ``200`` in each path above, then HTTP status will be ``503``. .. note:: Independently of the requested path, if *write_status_code_only* is ``False``, then it always write an HTTP response through :func:`_write_status_response`, with the node status. :param write_status_code_only: indicates that instead of a normal HTTP response we should send only the HTTP Status Code and close the connection. Useful when health-checks are executed by HAProxy. """ path = '/primary' if self.path == '/' else self.path response = self.get_postgresql_status() patroni = self.server.patroni cluster = patroni.dcs.cluster global_config = patroni.config.get_global_config(cluster) leader_optime = cluster and cluster.last_lsn or 0 replayed_location = response.get('xlog', {}).get('replayed_location', 0) max_replica_lag = parse_int(self.path_query.get('lag', [sys.maxsize])[0], 'B') if max_replica_lag is None: max_replica_lag = sys.maxsize is_lagging = leader_optime and leader_optime > replayed_location + max_replica_lag replica_status_code = 200 if not patroni.noloadbalance and not is_lagging and \ response.get('role') == 'replica' and response.get('state') == 'running' else 503 if not cluster and response.get('pause'): leader_status_code = 200 if response.get('role') in ('master', 'primary', 'standby_leader') else 503 primary_status_code = 200 if response.get('role') in ('master', 'primary') else 503 standby_leader_status_code = 200 if response.get('role') == 'standby_leader' else 503 elif patroni.ha.is_leader(): leader_status_code = 200 if global_config.is_standby_cluster: primary_status_code = replica_status_code = 503 standby_leader_status_code = 200 if response.get('role') in ('replica', 'standby_leader') else 503 else: primary_status_code = 200 standby_leader_status_code = 503 else: leader_status_code = primary_status_code = standby_leader_status_code = 503 status_code = 503 ignore_tags = False if 'standby_leader' in path or 'standby-leader' in path: status_code = standby_leader_status_code ignore_tags = True elif 'leader' in path: status_code = leader_status_code ignore_tags = True elif 'master' in path or 'primary' in path or 'read-write' in path: status_code = primary_status_code ignore_tags = True elif 'replica' in path: status_code = replica_status_code elif 'read-only' in path and 'sync' not in path: status_code = 200 if 200 in (primary_status_code, standby_leader_status_code) else replica_status_code elif 'health' in path: status_code = 200 if response.get('state') == 'running' else 503 elif cluster: # dcs is available is_synchronous = response.get('sync_standby') if path in ('/sync', '/synchronous') and is_synchronous: status_code = replica_status_code elif path in ('/async', '/asynchronous') and not is_synchronous: status_code = replica_status_code elif path in ('/read-only-sync', '/read-only-synchronous'): if 200 in (primary_status_code, standby_leader_status_code): status_code = 200 elif is_synchronous: status_code = replica_status_code # check for user defined tags in query params if not ignore_tags and status_code == 200: qs_tag_prefix = "tag_" for qs_key, qs_value in self.path_query.items(): if not qs_key.startswith(qs_tag_prefix): continue qs_key = qs_key[len(qs_tag_prefix):] qs_value = qs_value[0] instance_tag_value = patroni.tags.get(qs_key) # tag not registered for instance if instance_tag_value is None: status_code = 503 break if not isinstance(instance_tag_value, str): instance_tag_value = str(instance_tag_value).lower() if instance_tag_value != qs_value: status_code = 503 break if write_status_code_only: # when haproxy sends OPTIONS request it reads only status code and nothing more self._write_status_code_only(status_code) else: self._write_status_response(status_code, response) def do_OPTIONS(self) -> None: """Handle an ``OPTIONS`` request. Write a simple HTTP response that represents the current PostgreSQL status. Send only ``200 OK`` or ``503 Service Unavailable`` as a response and nothing more, particularly no headers. """ self.do_GET(write_status_code_only=True) def do_HEAD(self) -> None: """Handle a ``HEAD`` request. Write a simple HTTP response that represents the current PostgreSQL status. Send only ``200 OK`` or ``503 Service Unavailable`` as a response and nothing more, particularly no headers. """ self.do_GET(write_status_code_only=True) def do_GET_liveness(self) -> None: """Handle a ``GET`` request to ``/liveness`` path. Write a simple HTTP response with HTTP status: * ``200``: * If the cluster is in maintenance mode; or * If Patroni heartbeat loop is properly running; * ``503``: * if Patroni heartbeat loop last run was more than ``ttl`` setting ago on the primary (or twice the value of ``ttl`` on a replica). """ patroni: Patroni = self.server.patroni is_primary = patroni.postgresql.role in ('master', 'primary') and patroni.postgresql.is_running() # We can tolerate Patroni problems longer on the replica. # On the primary the liveness probe most likely will start failing only after the leader key expired. # It should not be a big problem because replicas will see that the primary is still alive via REST API call. liveness_threshold = patroni.dcs.ttl * (1 if is_primary else 2) # In maintenance mode (pause) we are fine if heartbeat loop stuck. status_code = 200 if patroni.ha.is_paused() or patroni.next_run + liveness_threshold > time.time() else 503 self._write_status_code_only(status_code) def do_GET_readiness(self) -> None: """Handle a ``GET`` request to ``/readiness`` path. Write a simple HTTP response which HTTP status can be: * ``200``: * If this Patroni node holds the DCS leader lock; or * If this PostgreSQL instance is up and running; * ``503``: if none of the previous conditions apply. """ patroni = self.server.patroni if patroni.ha.is_leader(): status_code = 200 elif patroni.postgresql.state == 'running': status_code = 200 if patroni.dcs.cluster else 503 else: status_code = 503 self._write_status_code_only(status_code) def do_GET_patroni(self) -> None: """Handle a ``GET`` request to ``/patroni`` path. Write an HTTP response through :func:`_write_status_response`, with HTTP status ``200`` and the status of Postgres. """ response = self.get_postgresql_status(True) self._write_status_response(200, response) def do_GET_cluster(self) -> None: """Handle a ``GET`` request to ``/cluster`` path. Write an HTTP response with JSON content based on the output of :func:`~patroni.utils.cluster_as_json`, with HTTP status ``200`` and the JSON representation of the cluster topology. """ cluster = self.server.patroni.dcs.get_cluster() global_config = self.server.patroni.config.get_global_config(cluster) response = cluster_as_json(cluster, global_config) response['scope'] = self.server.patroni.postgresql.scope self._write_json_response(200, response) def do_GET_history(self) -> None: """Handle a ``GET`` request to ``/history`` path. Write an HTTP response with a JSON content representing the history of events in the cluster, with HTTP status ``200``. The response contains a :class:`list` of failover/switchover events. Each item is a :class:`list` with the following items: * Timeline when the event occurred (class:`int`); * LSN at which the event occurred (class:`int`); * The reason for the event (class:`str`); * Timestamp when the new timeline was created (class:`str`); * Name of the involved Patroni node (class:`str`). """ cluster = self.server.patroni.dcs.cluster or self.server.patroni.dcs.get_cluster() self._write_json_response(200, cluster.history and cluster.history.lines or []) def do_GET_config(self) -> None: """Handle a ``GET`` request to ``/config`` path. Write an HTTP response with a JSON content representing the Patroni configuration that is stored in the DCS, with HTTP status ``200``. If the cluster information is not available in the DCS, then it will respond with no body and HTTP status ``502`` instead. """ cluster = self.server.patroni.dcs.cluster or self.server.patroni.dcs.get_cluster() if cluster.config: self._write_json_response(200, cluster.config.data) else: self.send_error(502) def do_GET_metrics(self) -> None: """Handle a ``GET`` request to ``/metrics`` path. Write an HTTP response with plain text content in the format used by Prometheus, with HTTP status ``200``. The response contains the following items: * ``patroni_version``: Patroni version without periods, e.g. ``030002`` for Patroni ``3.0.2``; * ``patroni_postgres_running``: ``1`` if PostgreSQL is running, else ``0``; * ``patroni_postmaster_start_time``: epoch timestamp since Postmaster was started; * ``patroni_master``: ``1`` if this node holds the leader lock, else ``0``; * ``patroni_primary``: same as ``patroni_master``; * ``patroni_xlog_location``: ``pg_wal_lsn_diff(pg_current_wal_flush_lsn(), '0/0')`` if leader, else ``0``; * ``patroni_standby_leader``: ``1`` if standby leader node, else ``0``; * ``patroni_replica``: ``1`` if a replica, else ``0``; * ``patroni_sync_standby``: ``1`` if a sync replica, else ``0``; * ``patroni_xlog_received_location``: ``pg_wal_lsn_diff(pg_last_wal_receive_lsn(), '0/0')``; * ``patroni_xlog_replayed_location``: ``pg_wal_lsn_diff(pg_last_wal_replay_lsn(), '0/0)``; * ``patroni_xlog_replayed_timestamp``: ``pg_last_xact_replay_timestamp``; * ``patroni_xlog_paused``: ``pg_is_wal_replay_paused()``; * ``patroni_postgres_server_version``: Postgres version without periods, e.g. ``150002`` for Postgres ``15.2``; * ``patroni_cluster_unlocked``: ``1`` if no one holds the leader lock, else ``0``; * ``patroni_failsafe_mode_is_active``: ``1`` if ``failsafe_mode`` is currently active, else ``0``; * ``patroni_postgres_timeline``: PostgreSQL timeline based on current WAL file name; * ``patroni_dcs_last_seen``: epoch timestamp when DCS was last contacted successfully; * ``patroni_pending_restart``: ``1`` if this PostgreSQL node is pending a restart, else ``0``; * ``patroni_is_paused``: ``1`` if Patroni is in maintenance node, else ``0``. For PostgreSQL v9.6+ the response will also have the following: * ``patroni_postgres_streaming``: 1 if Postgres is streaming from another node, else ``0``; * ``patroni_postgres_in_archive_recovery``: ``1`` if Postgres isn't streaming and there is ``restore_command`` available, else ``0``. """ postgres = self.get_postgresql_status(True) patroni = self.server.patroni epoch = datetime.datetime(1970, 1, 1, tzinfo=tzutc) metrics: List[str] = [] labels = f'{{scope="{patroni.postgresql.scope}",name="{patroni.postgresql.name}"}}' metrics.append("# HELP patroni_version Patroni semver without periods.") metrics.append("# TYPE patroni_version gauge") padded_semver = ''.join([x.zfill(2) for x in patroni.version.split('.')]) # 2.0.2 => 020002 metrics.append("patroni_version{0} {1}".format(labels, padded_semver)) metrics.append("# HELP patroni_postgres_running Value is 1 if Postgres is running, 0 otherwise.") metrics.append("# TYPE patroni_postgres_running gauge") metrics.append("patroni_postgres_running{0} {1}".format(labels, int(postgres['state'] == 'running'))) metrics.append("# HELP patroni_postmaster_start_time Epoch seconds since Postgres started.") metrics.append("# TYPE patroni_postmaster_start_time gauge") postmaster_start_time = postgres.get('postmaster_start_time') postmaster_start_time = (postmaster_start_time - epoch).total_seconds() if postmaster_start_time else 0 metrics.append("patroni_postmaster_start_time{0} {1}".format(labels, postmaster_start_time)) metrics.append("# HELP patroni_master Value is 1 if this node is the leader, 0 otherwise.") metrics.append("# TYPE patroni_master gauge") metrics.append("patroni_master{0} {1}".format(labels, int(postgres['role'] in ('master', 'primary')))) metrics.append("# HELP patroni_primary Value is 1 if this node is the leader, 0 otherwise.") metrics.append("# TYPE patroni_primary gauge") metrics.append("patroni_primary{0} {1}".format(labels, int(postgres['role'] in ('master', 'primary')))) metrics.append("# HELP patroni_xlog_location Current location of the Postgres" " transaction log, 0 if this node is not the leader.") metrics.append("# TYPE patroni_xlog_location counter") metrics.append("patroni_xlog_location{0} {1}".format(labels, postgres.get('xlog', {}).get('location', 0))) metrics.append("# HELP patroni_standby_leader Value is 1 if this node is the standby_leader, 0 otherwise.") metrics.append("# TYPE patroni_standby_leader gauge") metrics.append("patroni_standby_leader{0} {1}".format(labels, int(postgres['role'] == 'standby_leader'))) metrics.append("# HELP patroni_replica Value is 1 if this node is a replica, 0 otherwise.") metrics.append("# TYPE patroni_replica gauge") metrics.append("patroni_replica{0} {1}".format(labels, int(postgres['role'] == 'replica'))) metrics.append("# HELP patroni_sync_standby Value is 1 if this node is a sync standby replica, 0 otherwise.") metrics.append("# TYPE patroni_sync_standby gauge") metrics.append("patroni_sync_standby{0} {1}".format(labels, int(postgres.get('sync_standby', False)))) metrics.append("# HELP patroni_xlog_received_location Current location of the received" " Postgres transaction log, 0 if this node is not a replica.") metrics.append("# TYPE patroni_xlog_received_location counter") metrics.append("patroni_xlog_received_location{0} {1}" .format(labels, postgres.get('xlog', {}).get('received_location', 0))) metrics.append("# HELP patroni_xlog_replayed_location Current location of the replayed" " Postgres transaction log, 0 if this node is not a replica.") metrics.append("# TYPE patroni_xlog_replayed_location counter") metrics.append("patroni_xlog_replayed_location{0} {1}" .format(labels, postgres.get('xlog', {}).get('replayed_location', 0))) metrics.append("# HELP patroni_xlog_replayed_timestamp Current timestamp of the replayed" " Postgres transaction log, 0 if null.") metrics.append("# TYPE patroni_xlog_replayed_timestamp gauge") replayed_timestamp = postgres.get('xlog', {}).get('replayed_timestamp') replayed_timestamp = (replayed_timestamp - epoch).total_seconds() if replayed_timestamp else 0 metrics.append("patroni_xlog_replayed_timestamp{0} {1}".format(labels, replayed_timestamp)) metrics.append("# HELP patroni_xlog_paused Value is 1 if the Postgres xlog is paused, 0 otherwise.") metrics.append("# TYPE patroni_xlog_paused gauge") metrics.append("patroni_xlog_paused{0} {1}" .format(labels, int(postgres.get('xlog', {}).get('paused', False) is True))) if postgres.get('server_version', 0) >= 90600: metrics.append("# HELP patroni_postgres_streaming Value is 1 if Postgres is streaming, 0 otherwise.") metrics.append("# TYPE patroni_postgres_streaming gauge") metrics.append("patroni_postgres_streaming{0} {1}" .format(labels, int(postgres.get('replication_state') == 'streaming'))) metrics.append("# HELP patroni_postgres_in_archive_recovery Value is 1" " if Postgres is replicating from archive, 0 otherwise.") metrics.append("# TYPE patroni_postgres_in_archive_recovery gauge") metrics.append("patroni_postgres_in_archive_recovery{0} {1}" .format(labels, int(postgres.get('replication_state') == 'in archive recovery'))) metrics.append("# HELP patroni_postgres_server_version Version of Postgres (if running), 0 otherwise.") metrics.append("# TYPE patroni_postgres_server_version gauge") metrics.append("patroni_postgres_server_version {0} {1}".format(labels, postgres.get('server_version', 0))) metrics.append("# HELP patroni_cluster_unlocked Value is 1 if the cluster is unlocked, 0 if locked.") metrics.append("# TYPE patroni_cluster_unlocked gauge") metrics.append("patroni_cluster_unlocked{0} {1}".format(labels, int(postgres.get('cluster_unlocked', 0)))) metrics.append("# HELP patroni_failsafe_mode_is_active Value is 1 if failsafe mode is active, 0 if inactive.") metrics.append("# TYPE patroni_failsafe_mode_is_active gauge") metrics.append("patroni_failsafe_mode_is_active{0} {1}" .format(labels, int(postgres.get('failsafe_mode_is_active', 0)))) metrics.append("# HELP patroni_postgres_timeline Postgres timeline of this node (if running), 0 otherwise.") metrics.append("# TYPE patroni_postgres_timeline counter") metrics.append("patroni_postgres_timeline{0} {1}".format(labels, postgres.get('timeline', 0))) metrics.append("# HELP patroni_dcs_last_seen Epoch timestamp when DCS was last contacted successfully" " by Patroni.") metrics.append("# TYPE patroni_dcs_last_seen gauge") metrics.append("patroni_dcs_last_seen{0} {1}".format(labels, postgres.get('dcs_last_seen', 0))) metrics.append("# HELP patroni_pending_restart Value is 1 if the node needs a restart, 0 otherwise.") metrics.append("# TYPE patroni_pending_restart gauge") metrics.append("patroni_pending_restart{0} {1}" .format(labels, int(patroni.postgresql.pending_restart))) metrics.append("# HELP patroni_is_paused Value is 1 if auto failover is disabled, 0 otherwise.") metrics.append("# TYPE patroni_is_paused gauge") metrics.append("patroni_is_paused{0} {1}".format(labels, int(postgres.get('pause', 0)))) self.write_response(200, '\n'.join(metrics) + '\n', content_type='text/plain') def _read_json_content(self, body_is_optional: bool = False) -> Optional[Dict[Any, Any]]: """Read JSON from HTTP request body. .. note:: Retrieves the request body based on `content-length` HTTP header. The body is expected to be a JSON string with that length. If request body is expected but `content-length` HTTP header is absent, then write an HTTP response with HTTP status ``411``. If request body is expected but contains nothing, or if an exception is faced, then write an HTTP response with HTTP status ``400``. :param body_is_optional: if ``False`` then the request must contain a body. If ``True``, then the request may or may not contain a body. :returns: deserialized JSON string from request body, if present. If body is absent, but *body_is_optional* is ``True``, then return an empty dictionary. Returns ``None`` otherwise. """ if 'content-length' not in self.headers: return self.send_error(411) if not body_is_optional else {} try: content_length = int(self.headers.get('content-length') or 0) if content_length == 0 and body_is_optional: return {} request: Union[Dict[str, Any], Any] = json.loads(self.rfile.read(content_length).decode('utf-8')) if isinstance(request, dict) and (request or body_is_optional): return request except Exception: logger.exception('Bad request') self.send_error(400) @check_access def do_PATCH_config(self) -> None: """Handle a ``PATCH`` request to ``/config`` path. Updates the Patroni configuration based on the JSON request body, then writes a response with the new configuration, with HTTP status ``200``. .. note:: If the configuration has been previously wiped out from DCS, then write a response with HTTP status ``503``. If applying a configuration value fails, then write a response with HTTP status ``409``. """ request = self._read_json_content() if request: cluster = self.server.patroni.dcs.get_cluster() if not (cluster.config and cluster.config.modify_version): return self.send_error(503) data = cluster.config.data.copy() if patch_config(data, request): value = json.dumps(data, separators=(',', ':')) if not self.server.patroni.dcs.set_config_value(value, cluster.config.version): return self.send_error(409) self.server.patroni.ha.wakeup() self._write_json_response(200, data) @check_access def do_PUT_config(self) -> None: """Handle a ``PUT`` request to ``/config`` path. Overwrites the Patroni configuration based on the JSON request body, then writes a response with the new configuration, with HTTP status ``200``. .. note:: If applying the new configuration fails, then write a response with HTTP status ``502``. """ request = self._read_json_content() if request: cluster = self.server.patroni.dcs.get_cluster() if not (cluster.config and deep_compare(request, cluster.config.data)): value = json.dumps(request, separators=(',', ':')) if not self.server.patroni.dcs.set_config_value(value): return self.send_error(502) self._write_json_response(200, request) @check_access def do_POST_reload(self) -> None: """Handle a ``POST`` request to ``/reload`` path. Schedules a reload to Patroni and writes a response with HTTP status ``202``. """ self.server.patroni.sighup_handler() self.write_response(202, 'reload scheduled') def do_GET_failsafe(self) -> None: """Handle a ``GET`` request to ``/failsafe`` path. Writes a response with a JSON string body containing all nodes that are known to Patroni at a given point in time, with HTTP status ``200``. The JSON contains a dictionary, each key is the name of the Patroni node, and the corresponding value is the URI to access `/patroni` path of its REST API. .. note:: If ``failsafe_mode`` is not enabled, then write a response with HTTP status ``502``. """ failsafe = self.server.patroni.dcs.failsafe if isinstance(failsafe, dict): self._write_json_response(200, failsafe) else: self.send_error(502) @check_access def do_POST_failsafe(self) -> None: """Handle a ``POST`` request to ``/failsafe`` path. Writes a response with HTTP status ``200`` if this node is a Standby, or with HTTP status ``500`` if this is the primary. .. note:: If ``failsafe_mode`` is not enabled, then write a response with HTTP status ``502``. """ if self.server.patroni.ha.is_failsafe_mode(): request = self._read_json_content() if request: message = self.server.patroni.ha.update_failsafe(request) or 'Accepted' code = 200 if message == 'Accepted' else 500 self.write_response(code, message) else: self.send_error(502) @check_access def do_POST_sigterm(self) -> None: """Handle a ``POST`` request to ``/sigterm`` path. Schedule a shutdown and write a response with HTTP status ``202``. .. note:: Only for behave testing on Windows. """ if os.name == 'nt' and os.getenv('BEHAVE_DEBUG'): self.server.patroni.api_sigterm() self.write_response(202, 'shutdown scheduled') @staticmethod def parse_schedule(schedule: str, action: str) -> Tuple[Union[int, None], Union[str, None], Union[datetime.datetime, None]]: """Parse the given *schedule* and validate it. :param schedule: a string representing a timestamp, e.g. ``2023-04-14T20:27:00+00:00``. :param action: the action to be scheduled (``restart``, ``switchover``, or ``failover``). :returns: a tuple composed of 3 items: * Suggested HTTP status code for a response: * ``None``: if no issue was faced while parsing, leaving it up to the caller to decide the status; or * ``400``: if no timezone information could be found in *schedule*; or * ``422``: if *schedule* is invalid -- in the past or not parsable. * An error message, if any error is faced, otherwise ``None``; * Parsed *schedule*, if able to parse, otherwise ``None``. """ error = None scheduled_at = None try: scheduled_at = dateutil.parser.parse(schedule) if scheduled_at.tzinfo is None: error = 'Timezone information is mandatory for the scheduled {0}'.format(action) status_code = 400 elif scheduled_at < datetime.datetime.now(tzutc): error = 'Cannot schedule {0} in the past'.format(action) status_code = 422 else: status_code = None except (ValueError, TypeError): logger.exception('Invalid scheduled %s time: %s', action, schedule) error = 'Unable to parse scheduled timestamp. It should be in an unambiguous format, e.g. ISO 8601' status_code = 422 return status_code, error, scheduled_at @check_access def do_POST_restart(self) -> None: """Handle a ``POST`` request to ``/restart`` path. Used to restart postgres (or schedule a restart), mainly by ``patronictl restart``. The request body should be a JSON dictionary, and it can contain the following keys: * ``schedule``: timestamp at which the restart should occur; * ``role``: restart only nodes which role is ``role``. Can be either: * ``primary`` (or ``master``); or * ``replica``. * ``postgres_version``: restart only nodes which PostgreSQL version is less than ``postgres_version``, e.g. ``15.2``; * ``timeout``: if restart takes longer than ``timeout`` return an error and fail over to a replica; * ``restart_pending``: if we should restart only when have ``pending restart`` flag; Response HTTP status codes: * ``200``: if successfully performed an immediate restart; or * ``202``: if successfully scheduled a restart for later; or * ``500``: if the cluster is in maintenance mode; or * ``400``: if * ``role`` value is invalid; or * ``postgres_version`` value is invalid; or * ``timeout`` is not a number, or lesser than ``0``; or * request contains an unknown key; or * exception is faced while performing an immediate restart. * ``409``: if another restart was already previously scheduled; or * ``503``: if any issue was found while performing an immediate restart; or * HTTP status returned by :func:`parse_schedule`, if any error was observed while parsing the schedule. .. note:: If it's not able to parse the request body, then the request is silently discarded. """ status_code = 500 data = 'restart failed' request = self._read_json_content(body_is_optional=True) cluster = self.server.patroni.dcs.get_cluster() if request is None: # failed to parse the json return if request: logger.debug("received restart request: {0}".format(request)) if self.server.patroni.config.get_global_config(cluster).is_paused and 'schedule' in request: self.write_response(status_code, "Can't schedule restart in the paused state") return for k in request: if k == 'schedule': (_, data, request[k]) = self.parse_schedule(request[k], "restart") if _: status_code = _ break elif k == 'role': if request[k] not in ('master', 'primary', 'replica'): status_code = 400 data = "PostgreSQL role should be either primary or replica" break elif k == 'postgres_version': try: postgres_version_to_int(request[k]) except PostgresException as e: status_code = 400 data = e.value break elif k == 'timeout': request[k] = parse_int(request[k], 's') if request[k] is None or request[k] <= 0: status_code = 400 data = "Timeout should be a positive number of seconds" break elif k != 'restart_pending': status_code = 400 data = "Unknown filter for the scheduled restart: {0}".format(k) break else: if 'schedule' not in request: try: status, data = self.server.patroni.ha.restart(request) status_code = 200 if status else 503 except Exception: logger.exception('Exception during restart') status_code = 400 else: if self.server.patroni.ha.schedule_future_restart(request): data = "Restart scheduled" status_code = 202 else: data = "Another restart is already scheduled" status_code = 409 # pyright thinks ``data`` can be ``None`` because ``parse_schedule`` call may return ``None``. However, if # that's the case, ``data`` will be overwritten when the ``for`` loop ends if TYPE_CHECKING: # pragma: no cover assert isinstance(data, str) self.write_response(status_code, data) @check_access def do_DELETE_restart(self) -> None: """Handle a ``DELETE`` request to ``/restart`` path. Used to remove a scheduled restart of PostgreSQL. Response HTTP status codes: * ``200``: if a scheduled restart was removed; or * ``404``: if no scheduled restart could be found. """ if self.server.patroni.ha.delete_future_restart(): data = "scheduled restart deleted" code = 200 else: data = "no restarts are scheduled" code = 404 self.write_response(code, data) @check_access def do_DELETE_switchover(self) -> None: """Handle a ``DELETE`` request to ``/switchover`` path. Used to remove a scheduled switchover in the cluster. It writes a response, and the HTTP status code can be: * ``200``: if a scheduled switchover was removed; or * ``404``: if no scheduled switchover could be found; or * ``409``: if not able to update the switchover info in the DCS. """ failover = self.server.patroni.dcs.get_cluster().failover if failover and failover.scheduled_at: if not self.server.patroni.dcs.manual_failover('', '', version=failover.version): return self.send_error(409) else: data = "scheduled switchover deleted" code = 200 else: data = "no switchover is scheduled" code = 404 self.write_response(code, data) @check_access def do_POST_reinitialize(self) -> None: """Handle a ``POST`` request to ``/reinitialize`` path. The request body may contain a JSON dictionary with the following key: * ``force``: ``True`` if we want to cancel an already running task in order to reinit a replica. Response HTTP status codes: * ``200``: if the reinit operation has started; or * ``503``: if any error is returned by :func:`~patroni.ha.Ha.reinitialize`. """ request = self._read_json_content(body_is_optional=True) if request: logger.debug('received reinitialize request: %s', request) force = isinstance(request, dict) and parse_bool(request.get('force')) or False data = self.server.patroni.ha.reinitialize(force) if data is None: status_code = 200 data = 'reinitialize started' else: status_code = 503 self.write_response(status_code, data) def poll_failover_result(self, leader: Optional[str], candidate: Optional[str], action: str) -> Tuple[int, str]: """Poll failover/switchover operation until it finishes or times out. :param leader: name of the current Patroni leader. :param candidate: name of the Patroni node to be promoted. :param action: the action that is ongoing (``switchover`` or ``failover``). :returns: a tuple composed of 2 items: * Response HTTP status codes: * ``200``: if the operation succeeded; or * ``503``: if the operation failed or timed out. * A status message about the operation. """ timeout = max(10, self.server.patroni.dcs.loop_wait) for _ in range(0, timeout * 2): time.sleep(1) try: cluster = self.server.patroni.dcs.get_cluster() if not cluster.is_unlocked() and cluster.leader and cluster.leader.name != leader: if not candidate or candidate == cluster.leader.name: return 200, 'Successfully {0}ed over to "{1}"'.format(action[:-4], cluster.leader.name) else: return 200, '{0}ed over to "{1}" instead of "{2}"'.format(action[:-4].title(), cluster.leader.name, candidate) if not cluster.failover: return 503, action.title() + ' failed' except Exception as e: logger.debug('Exception occurred during polling %s result: %s', action, e) return 503, action.title() + ' status unknown' def is_failover_possible(self, cluster: Cluster, leader: Optional[str], candidate: Optional[str], action: str) -> Optional[str]: """Checks whether there are nodes that could take over after demoting the primary. :param cluster: the Patroni cluster. :param leader: name of the current Patroni leader. :param candidate: name of the Patroni node to be promoted. :param action: the action to be performed (``switchover`` or ``failover``). :returns: a string with the error message or ``None`` if good nodes are found. """ is_synchronous_mode = self.server.patroni.config.get_global_config(cluster).is_synchronous_mode if leader and (not cluster.leader or cluster.leader.name != leader): return 'leader name does not match' if candidate: if action == 'switchover' and is_synchronous_mode and not cluster.sync.matches(candidate): return 'candidate name does not match with sync_standby' members = [m for m in cluster.members if m.name == candidate] if not members: return 'candidate does not exists' elif is_synchronous_mode: members = [m for m in cluster.members if cluster.sync.matches(m.name)] if not members: return action + ' is not possible: can not find sync_standby' else: members = [m for m in cluster.members if not cluster.leader or m.name != cluster.leader.name and m.api_url] if not members: return action + ' is not possible: cluster does not have members except leader' for st in self.server.patroni.ha.fetch_nodes_statuses(members): if st.failover_limitation() is None: return None return action + ' is not possible: no good candidates have been found' @check_access def do_POST_failover(self, action: str = 'failover') -> None: """Handle a ``POST`` request to ``/failover`` path. Handles manual failovers/switchovers, mainly from ``patronictl``. The request body should be a JSON dictionary, and it can contain the following keys: * ``leader``: name of the current leader in the cluster; * ``candidate``: name of the Patroni node to be promoted; * ``scheduled_at``: a string representing the timestamp when to execute the switchover/failover, e.g. ``2023-04-14T20:27:00+00:00``. Response HTTP status codes: * ``202``: if operation has been scheduled; * ``412``: if operation is not possible; * ``503``: if unable to register the operation to the DCS; * HTTP status returned by :func:`parse_schedule`, if any error was observed while parsing the schedule; * HTTP status returned by :func:`poll_failover_result` if the operation has been processed immediately; * ``400``: if none of the above applies. .. note:: If unable to parse the request body, then the request is silently discarded. :param action: the action to be performed (``switchover`` or ``failover``). """ request = self._read_json_content() (status_code, data) = (400, '') if not request: return leader = request.get('leader') candidate = request.get('candidate') or request.get('member') scheduled_at = request.get('scheduled_at') cluster = self.server.patroni.dcs.get_cluster() global_config = self.server.patroni.config.get_global_config(cluster) logger.info("received %s request with leader=%s candidate=%s scheduled_at=%s", action, leader, candidate, scheduled_at) if action == 'failover' and not candidate: data = 'Failover could be performed only to a specific candidate' elif action == 'switchover' and not leader: data = 'Switchover could be performed only from a specific leader' if not data and scheduled_at: if action == 'failover': data = "Failover can't be scheduled" elif global_config.is_paused: data = "Can't schedule switchover in the paused state" else: (status_code, data, scheduled_at) = self.parse_schedule(scheduled_at, action) if not data and global_config.is_paused and not candidate: data = 'Switchover is possible only to a specific candidate in a paused state' if action == 'failover' and leader: logger.warning('received failover request with leader specifed - performing switchover instead') action = 'switchover' if not data and leader == candidate: data = 'Switchover target and source are the same' if not data and not scheduled_at: data = self.is_failover_possible(cluster, leader, candidate, action) if data: status_code = 412 if not data: if self.server.patroni.dcs.manual_failover(leader, candidate, scheduled_at=scheduled_at): self.server.patroni.ha.wakeup() if scheduled_at: data = action.title() + ' scheduled' status_code = 202 else: status_code, data = self.poll_failover_result(cluster.leader and cluster.leader.name, candidate, action) else: data = 'failed to write failover key into DCS' status_code = 503 # pyright thinks ``status_code`` can be ``None`` because ``parse_schedule`` call may return ``None``. However, # if that's the case, ``status_code`` will be overwritten somewhere between ``parse_schedule`` and # ``write_response`` calls. if TYPE_CHECKING: # pragma: no cover assert isinstance(status_code, int) self.write_response(status_code, data) def do_POST_switchover(self) -> None: """Handle a ``POST`` request to ``/switchover`` path. Calls :func:`do_POST_failover` with ``switchover`` option. """ self.do_POST_failover(action='switchover') @check_access def do_POST_citus(self) -> None: """Handle a ``POST`` request to ``/citus`` path. Call :func:`~patroni.postgresql.CitusHandler.handle_event` to handle the request, then write a response with HTTP status code ``200``. .. note:: If unable to parse the request body, then the request is silently discarded. """ request = self._read_json_content() if not request: return patroni = self.server.patroni if patroni.postgresql.citus_handler.is_coordinator() and patroni.ha.is_leader(): cluster = patroni.dcs.get_cluster() patroni.postgresql.citus_handler.handle_event(cluster, request) self.write_response(200, 'OK') def parse_request(self) -> bool: """Override :func:`parse_request` to enrich basic functionality of :class:`~http.server.BaseHTTPRequestHandler`. Original class can only invoke :func:`do_GET`, :func:`do_POST`, :func:`do_PUT`, etc method implementations if they are defined. But we would like to have at least some simple routing mechanism, i.e.: * ``GET /uri1/part2`` request should invoke :func:`do_GET_uri1()` * ``POST /other`` should invoke :func:`do_POST_other()` If the :func:`do__` method does not exist we'll fall back to original behavior. :returns: ``True`` for success, ``False`` for failure; on failure, any relevant error response has already been sent back. """ ret = BaseHTTPRequestHandler.parse_request(self) if ret: urlpath = urlparse(self.path) self.path = urlpath.path self.path_query = parse_qs(urlpath.query) or {} mname = self.path.lstrip('/').split('/')[0] mname = self.command + ('_' + mname if mname else '') if hasattr(self, 'do_' + mname): self.command = mname return ret def query(self, sql: str, *params: Any, retry: bool = False) -> List[Tuple[Any, ...]]: """Execute *sql* query with *params* and optionally return results. :param sql: the SQL statement to be run. :param params: positional arguments to call :func:`RestApiServer.query` with. :param retry: whether the query should be retried upon failure or given up immediately. :returns: a list of rows that were fetched from the database. """ if not retry: return self.server.query(sql, *params) return Retry(delay=1, retry_exceptions=PostgresConnectionException)(self.server.query, sql, *params) def get_postgresql_status(self, retry: bool = False) -> Dict[str, Any]: """Builds an object representing a status of "postgres". Some of the values are collected by executing a query and other are taken from the state stored in memory. :param retry: whether the query should be retried if failed or give up immediately :returns: a dict with the status of Postgres/Patroni. The keys are: * ``state``: Postgres state among ``stopping``, ``stopped``, ``stop failed``, ``crashed``, ``running``, ``starting``, ``start failed``, ``restarting``, ``restart failed``, ``initializing new cluster``, ``initdb failed``, ``running custom bootstrap script``, ``custom bootstrap failed``, ``creating replica``, or ``unknown``; * ``postmaster_start_time``: ``pg_postmaster_start_time()``; * ``role``: ``replica`` or ``master`` based on ``pg_is_in_recovery()`` output; * ``server_version``: Postgres version without periods, e.g. ``150002`` for Postgres ``15.2``; * ``xlog``: dictionary. Its structure depends on ``role``: * If ``master``: * ``location``: ``pg_current_wal_flush_lsn()`` * If ``replica``: * ``received_location``: ``pg_wal_lsn_diff(pg_last_wal_receive_lsn(), '0/0')``; * ``replayed_location``: ``pg_wal_lsn_diff(pg_last_wal_replay_lsn(), '0/0)``; * ``replayed_timestamp``: ``pg_last_xact_replay_timestamp``; * ``paused``: ``pg_is_wal_replay_paused()``; * ``sync_standby``: ``True`` if replication mode is synchronous and this is a sync standby; * ``timeline``: PostgreSQL primary node timeline; * ``replication``: :class:`list` of :class:`dict` entries, one for each replication connection. Each entry contains the following keys: * ``application_name``: ``pg_stat_activity.application_name``; * ``client_addr``: ``pg_stat_activity.client_addr``; * ``state``: ``pg_stat_replication.state``; * ``sync_priority``: ``pg_stat_replication.sync_priority``; * ``sync_state``: ``pg_stat_replication.sync_state``; * ``usename``: ``pg_stat_activity.usename``. * ``pause``: ``True`` if cluster is in maintenance mode; * ``cluster_unlocked``: ``True`` if cluster has no node holding the leader lock; * ``failsafe_mode_is_active``: ``True`` if DCS failsafe mode is currently active; * ``dcs_last_seen``: epoch timestamp DCS was last reached by Patroni. """ postgresql = self.server.patroni.postgresql cluster = self.server.patroni.dcs.cluster global_config = self.server.patroni.config.get_global_config(cluster) try: if postgresql.state not in ('running', 'restarting', 'starting'): raise RetryFailedError('') replication_state = ('(pg_catalog.pg_stat_get_wal_receiver()).status' if postgresql.major_version >= 90600 else 'NULL') + ", " +\ ("pg_catalog.current_setting('restore_command')" if postgresql.major_version >= 120000 else "NULL") stmt = ("SELECT " + postgresql.POSTMASTER_START_TIME + ", " + postgresql.TL_LSN + "," " pg_catalog.pg_last_xact_replay_timestamp(), " + replication_state + "," " pg_catalog.array_to_json(pg_catalog.array_agg(pg_catalog.row_to_json(ri))) " "FROM (SELECT (SELECT rolname FROM pg_catalog.pg_authid WHERE oid = usesysid) AS usename," " application_name, client_addr, w.state, sync_state, sync_priority" " FROM pg_catalog.pg_stat_get_wal_senders() w, pg_catalog.pg_stat_get_activity(pid)) AS ri") row = self.query(stmt.format(postgresql.wal_name, postgresql.lsn_name, postgresql.wal_flush), retry=retry)[0] result = { 'state': postgresql.state, 'postmaster_start_time': row[0], 'role': 'replica' if row[1] == 0 else 'master', 'server_version': postgresql.server_version, 'xlog': ({ 'received_location': row[4] or row[3], 'replayed_location': row[3], 'replayed_timestamp': row[6], 'paused': row[5]} if row[1] == 0 else { 'location': row[2] }) } if result['role'] == 'replica' and global_config.is_standby_cluster: result['role'] = postgresql.role if result['role'] == 'replica' and global_config.is_synchronous_mode\ and cluster and cluster.sync.matches(postgresql.name): result['sync_standby'] = True if row[1] > 0: result['timeline'] = row[1] else: leader_timeline = None\ if not cluster or cluster.is_unlocked() or not cluster.leader else cluster.leader.timeline result['timeline'] = postgresql.replica_cached_timeline(leader_timeline) replication_state = postgresql.replication_state_from_parameters(row[1] > 0, row[7], row[8]) if replication_state: result['replication_state'] = replication_state if row[9]: result['replication'] = row[9] except (psycopg.Error, RetryFailedError, PostgresConnectionException): state = postgresql.state if state == 'running': logger.exception('get_postgresql_status') state = 'unknown' result: Dict[str, Any] = {'state': state, 'role': postgresql.role} if global_config.is_paused: result['pause'] = True if not cluster or cluster.is_unlocked(): result['cluster_unlocked'] = True if self.server.patroni.ha.failsafe_is_active(): result['failsafe_mode_is_active'] = True result['dcs_last_seen'] = self.server.patroni.dcs.last_seen return result def handle_one_request(self) -> None: """Parse and dispatch a request to the appropriate ``do_*`` method. .. note:: This is only used to keep track of latency when logging messages through :func:`log_message`. """ self.__start_time = time.time() BaseHTTPRequestHandler.handle_one_request(self) def log_message(self, format: str, *args: Any) -> None: """Log a custom ``debug`` message. Additionally, to *format*, the log entry contains the client IP address and the current latency of the request. :param format: printf-style format string message to be logged. :param args: arguments to be applied as inputs to *format*. """ latency = 1000.0 * (time.time() - self.__start_time) logger.debug("API thread: %s - - %s latency: %0.3f ms", self.client_address[0], format % args, latency) class RestApiServer(ThreadingMixIn, HTTPServer, Thread): """Patroni REST API server. An asynchronous thread-based HTTP server. """ # On 3.7+ the `ThreadingMixIn` gathers all non-daemon worker threads in order to join on them at server close. daemon_threads = True # Make worker threads "fire and forget" to prevent a memory leak. def __init__(self, patroni: Patroni, config: Dict[str, Any]) -> None: """Establish patroni configuration for the REST API daemon. Create a :class:`RestApiServer` instance. :param patroni: Patroni daemon process. :param config: ``restapi`` section of Patroni configuration. """ self.connection_string: str self.__auth_key = None self.__allowlist_include_members: Optional[bool] = None self.__allowlist: Tuple[Union[IPv4Network, IPv6Network], ...] = () self.http_extra_headers: Dict[str, str] = {} self.patroni = patroni self.__listen = None self.request_queue_size = int(config.get('request_queue_size', 5)) self.__ssl_options: Dict[str, Any] = {} self.__ssl_serial_number = None self._received_new_cert = False self.reload_config(config) self.daemon = True def query(self, sql: str, *params: Any) -> List[Tuple[Any, ...]]: """Execute *sql* query with *params* and optionally return results. .. note:: Prefer to use own connection to postgres and fallback to ``heartbeat`` when own isn't available. :param sql: the SQL statement to be run. :param params: positional arguments to be used as parameters for *sql*. :returns: a list of rows that were fetched from the database. :raises: :class:`psycopg.Error`: if had issues while executing *sql*. :class:`~patroni.exceptions.PostgresConnectionException`: if had issues while connecting to the database. """ # We first try to get a heartbeat connection because it is always required for the main thread. try: heartbeat_connection = self.patroni.postgresql.connection_pool.get('heartbeat') heartbeat_connection.get() # try to open psycopg connection to postgres except psycopg.Error as exc: raise PostgresConnectionException('connection problems') from exc try: connection = self.patroni.postgresql.connection_pool.get('restapi') connection.get() # try to open psycopg connection to postgres except psycopg.Error: logger.debug('restapi connection to postgres is not available') connection = heartbeat_connection return connection.query(sql, *params) @staticmethod def _set_fd_cloexec(fd: socket.socket) -> None: """Set ``FD_CLOEXEC`` for *fd*. It is used to avoid inheriting the REST API port when forking its process. .. note:: Only takes effect on non-Windows environments. :param fd: socket file descriptor. """ if os.name != 'nt': import fcntl flags = fcntl.fcntl(fd, fcntl.F_GETFD) fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) def check_basic_auth_key(self, key: str) -> bool: """Check if *key* matches the password configured for the REST API. :param key: the password received through the Basic authorization header of an HTTP request. :returns: ``True`` if *key* matches the password configured for the REST API. """ # pyright -- ``__auth_key`` was already checked through the caller method (:func:`check_auth_header`). if TYPE_CHECKING: # pragma: no cover assert self.__auth_key is not None return hmac.compare_digest(self.__auth_key, key.encode('utf-8')) def check_auth_header(self, auth_header: Optional[str]) -> Optional[str]: """Validate HTTP Basic authorization header, if present. :param auth_header: value of ``Authorization`` HTTP header, if present, else ``None``. :returns: an error message if any issue is found, ``None`` otherwise. """ if self.__auth_key: if auth_header is None: return 'no auth header received' if not auth_header.startswith('Basic ') or not self.check_basic_auth_key(auth_header[6:]): return 'not authenticated' @staticmethod def __resolve_ips(host: str, port: int) -> Iterator[Union[IPv4Network, IPv6Network]]: """Resolve *host* + *port* to one or more IP networks. :param host: hostname to be checked. :param port: port to be checked. :yields: *host* + *port* resolved to IP networks. """ try: for _, _, _, _, sa in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM, socket.IPPROTO_TCP): yield ip_network(sa[0], False) except Exception as e: logger.error('Failed to resolve %s: %r', host, e) def __members_ips(self) -> Iterator[Union[IPv4Network, IPv6Network]]: """Resolve each Patroni node ``restapi.connect_address`` to IP networks. .. note:: Only yields object if ``restapi.allowlist_include_members`` setting is enabled. :yields: each node ``restapi.connect_address`` resolved to an IP network. """ cluster = self.patroni.dcs.cluster if self.__allowlist_include_members and cluster: for cluster in [cluster] + list(cluster.workers.values()): for member in cluster.members: if member.api_url: try: r = urlparse(member.api_url) if r.hostname: port = r.port or (443 if r.scheme == 'https' else 80) for ip in self.__resolve_ips(r.hostname, port): yield ip except Exception as e: logger.debug('Failed to parse url %s: %r', member.api_url, e) def check_access(self, rh: RestApiHandler) -> Optional[bool]: """Ensure client has enough privileges to perform a given request. Write a response back to the client if any issue is observed, and the HTTP status may be: * ``401``: if ``Authorization`` header is missing or contain an invalid password; * ``403``: if: * ``restapi.allowlist`` was configured, but client IP is not in the allowed list; or * ``restapi.allowlist_include_members`` is enabled, but client IP is not in the members list; or * a client certificate is expected by the server, but is missing in the request. :param rh: the request which access should be checked. :returns: ``True`` if client access verification succeeded, otherwise ``None``. """ if self.__allowlist or self.__allowlist_include_members: incoming_ip = ip_address(rh.client_address[0]) if not any(incoming_ip in net for net in self.__allowlist + tuple(self.__members_ips())): return rh.write_response(403, 'Access is denied') if not hasattr(rh.request, 'getpeercert') or not rh.request.getpeercert(): # valid client cert isn't present if self.__protocol == 'https' and self.__ssl_options.get('verify_client') in ('required', 'optional'): return rh.write_response(403, 'client certificate required') reason = self.check_auth_header(rh.headers.get('Authorization')) if reason: headers = {'WWW-Authenticate': 'Basic realm="' + self.patroni.__class__.__name__ + '"'} return rh.write_response(401, reason, headers=headers) return True @staticmethod def __has_dual_stack() -> bool: """Check if the system has support for dual stack sockets. :returns: ``True`` if it has support for dual stack sockets. """ if hasattr(socket, 'AF_INET6') and hasattr(socket, 'IPPROTO_IPV6') and hasattr(socket, 'IPV6_V6ONLY'): sock = None try: sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, False) return True except socket.error as e: logger.debug('Error when working with ipv6 socket: %s', e) finally: if sock: sock.close() return False def __httpserver_init(self, host: str, port: int) -> None: """Start REST API HTTP server. .. note:: If system has no support for dual stack sockets, then IPv4 is preferred over IPv6. :param host: host to bind REST API to. :param port: port to bind REST API to. """ dual_stack = self.__has_dual_stack() hostname = host if hostname in ('', '*'): hostname = None info = socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE) # in case dual stack is not supported we want IPv4 to be preferred over IPv6 info.sort(key=lambda x: x[0] == socket.AF_INET, reverse=not dual_stack) self.address_family = info[0][0] try: HTTPServer.__init__(self, info[0][-1][:2], RestApiHandler) except socket.error: logger.error( "Couldn't start a service on '%s:%s', please check your `restapi.listen` configuration", hostname, port) raise def __initialize(self, listen: str, ssl_options: Dict[str, Any]) -> None: """Configure and start REST API HTTP server. .. note:: This method can be called upon first initialization, and also when reloading Patroni. When reloading Patroni, it restarts the HTTP server thread. :param listen: IP and port to bind REST API to. It should be a string in the format ``host:port``, where ``host`` can be a hostname or IP address. It is the value of ``restapi.listen`` setting. :param ssl_options: dictionary that may contain the following keys, depending on what has been configured in ``restapi`` section: * ``certfile``: path to PEM certificate. If given, will start in HTTPS mode; * ``keyfile``: path to key of ``certfile``; * ``keyfile_password``: password for decrypting ``keyfile``; * ``cafile``: path to CA file to validate client certificates; * ``ciphers``: permitted cipher suites; * ``verify_client``: value can be one among: * ``none``: do not check client certificates; * ``optional``: check client certificate only for unsafe REST API endpoints; * ``required``: check client certificate for all REST API endpoints. :raises: :class:`ValueError`: if any issue is faced while parsing *listen*. """ try: host, port = split_host_port(listen, None) except Exception: raise ValueError('Invalid "restapi" config: expected : for "listen", but got "{0}"' .format(listen)) reloading_config = self.__listen is not None # changing config in runtime if reloading_config: self.shutdown() # Rely on ThreadingMixIn.server_close() to have all requests terminate before we continue self.server_close() self.__listen = listen self.__ssl_options = ssl_options self._received_new_cert = False # reset to False after reload_config() self.__httpserver_init(host, port) Thread.__init__(self, target=self.serve_forever) self._set_fd_cloexec(self.socket) # wrap socket with ssl if 'certfile' is defined in a config.yaml # Sometime it's also needed to pass reference to a 'keyfile'. self.__protocol = 'https' if ssl_options.get('certfile') else 'http' if self.__protocol == 'https': import ssl ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=ssl_options.get('cafile')) if ssl_options.get('ciphers'): ctx.set_ciphers(ssl_options['ciphers']) ctx.load_cert_chain(certfile=ssl_options['certfile'], keyfile=ssl_options.get('keyfile'), password=ssl_options.get('keyfile_password')) verify_client = ssl_options.get('verify_client') if verify_client: modes = {'none': ssl.CERT_NONE, 'optional': ssl.CERT_OPTIONAL, 'required': ssl.CERT_REQUIRED} if verify_client in modes: ctx.verify_mode = modes[verify_client] else: logger.error('Bad value in the "restapi.verify_client": %s', verify_client) self.__ssl_serial_number = self.get_certificate_serial_number() self.socket = ctx.wrap_socket(self.socket, server_side=True, do_handshake_on_connect=False) if reloading_config: self.start() def process_request_thread(self, request: Union[socket.socket, Tuple[bytes, socket.socket]], client_address: Tuple[str, int]) -> None: """Process a request to the REST API. Wrapper for :func:`~socketserver.ThreadingMixIn.process_request_thread` that additionally: * Enable TCP keepalive * Perform SSL handshake (if an SSL socket). :param request: socket to handle the client request. :param client_address: tuple containing the client IP and port. """ if isinstance(request, socket.socket): enable_keepalive(request, 10, 3) if hasattr(request, 'context'): # SSLSocket from ssl import SSLSocket if isinstance(request, SSLSocket): # pyright request.do_handshake() super(RestApiServer, self).process_request_thread(request, client_address) def shutdown_request(self, request: Union[socket.socket, Tuple[bytes, socket.socket]]) -> None: """Shut down a request to the REST API. Wrapper for :func:`http.server.HTTPServer.shutdown_request` that additionally: * Perform SSL shutdown handshake (if a SSL socket). :param request: socket to handle the client request. """ if hasattr(request, 'context'): # SSLSocket try: from ssl import SSLSocket if isinstance(request, SSLSocket): # pyright request.unwrap() except Exception as e: logger.debug('Failed to shutdown SSL connection: %r', e) super(RestApiServer, self).shutdown_request(request) def get_certificate_serial_number(self) -> Optional[str]: """Get serial number of the certificate used by the REST API. :returns: serial number of the certificate configured through ``restapi.certfile`` setting. """ if self.__ssl_options.get('certfile'): import ssl try: crt: Dict[str, Any] = ssl._ssl._test_decode_cert(self.__ssl_options['certfile']) # pyright: ignore if TYPE_CHECKING: # pragma: no cover assert isinstance(crt, dict) return crt.get('serialNumber') except ssl.SSLError as e: logger.error('Failed to get serial number from certificate %s: %r', self.__ssl_options['certfile'], e) def reload_local_certificate(self) -> Optional[bool]: """Reload the SSL certificate used by the REST API. :return: ``True`` if a different certificate has been configured through ``restapi.certfile` setting, ``None`` otherwise. """ if self.__protocol == 'https': on_disk_cert_serial_number = self.get_certificate_serial_number() if on_disk_cert_serial_number != self.__ssl_serial_number: self._received_new_cert = True self.__ssl_serial_number = on_disk_cert_serial_number return True def _build_allowlist(self, value: Optional[List[str]]) -> Iterator[Union[IPv4Network, IPv6Network]]: """Resolve each entry in *value* to an IP network object. :param value: list of IPs and/or networks contained in ``restapi.allowlist`` setting. Each item can be a host, an IP, or a network in CIDR format. :yields: *host* + *port* resolved to IP networks. """ if isinstance(value, list): for v in value: if '/' in v: # netmask try: yield ip_network(v, False) except Exception as e: logger.error('Invalid value "%s" in the allowlist: %r', v, e) else: # ip or hostname, try to resolve it for ip in self.__resolve_ips(v, 8080): yield ip def reload_config(self, config: Dict[str, Any]) -> None: """Reload REST API configuration. :param config: dictionary representing values under the ``restapi`` configuration section. :raises: :class:`ValueError`: if ``listen`` key is not present in *config*. """ if 'listen' not in config: # changing config in runtime raise ValueError('Can not find "restapi.listen" config') self.__allowlist = tuple(self._build_allowlist(config.get('allowlist'))) self.__allowlist_include_members = config.get('allowlist_include_members') ssl_options = {n: config[n] for n in ('certfile', 'keyfile', 'keyfile_password', 'cafile', 'ciphers') if n in config} self.http_extra_headers = config.get('http_extra_headers') or {} self.http_extra_headers.update((config.get('https_extra_headers') or {}) if ssl_options.get('certfile') else {}) if isinstance(config.get('verify_client'), str): ssl_options['verify_client'] = config['verify_client'].lower() if self.__listen != config['listen'] or self.__ssl_options != ssl_options or self._received_new_cert: self.__initialize(config['listen'], ssl_options) self.__auth_key = base64.b64encode(config['auth'].encode('utf-8')) if 'auth' in config else None # pyright -- ``__listen`` is initially created as ``None``, but right after that it is replaced with a string # through :func:`__initialize`. if TYPE_CHECKING: # pragma: no cover assert isinstance(self.__listen, str) self.connection_string = uri(self.__protocol, config.get('connect_address') or self.__listen, 'patroni') def handle_error(self, request: Union[socket.socket, Tuple[bytes, socket.socket]], client_address: Tuple[str, int]) -> None: """Handle any exception that is thrown while handling a request to the REST API. Logs ``WARNING`` messages with the client information, and the stack trace of the faced exception. :param request: the request that faced an exception. :param client_address: a tuple composed of the IP and port of the client connection. """ logger.warning('Exception happened during processing of request from %s:%s', client_address[0], client_address[1]) logger.warning(traceback.format_exc()) patroni-3.2.2/patroni/async_executor.py000066400000000000000000000215431455170150700202650ustar00rootroot00000000000000"""Implement facilities for executing asynchronous tasks.""" import logging from threading import Event, Lock, RLock, Thread from types import TracebackType from typing import Any, Callable, Optional, Tuple, Type from .postgresql.cancellable import CancellableSubprocess logger = logging.getLogger(__name__) class CriticalTask(object): """Represents a critical task in a background process that we either need to cancel or get the result of. Fields of this object may be accessed only when holding a lock on it. To perform the critical task the background thread must, while holding lock on this object, check ``is_cancelled`` flag, run the task and mark the task as complete using :func:`complete`. The main thread must hold async lock to prevent the task from completing, hold lock on critical task object, call :func:`cancel`. If the task has completed :func:`cancel` will return ``False`` and ``result`` field will contain the result of the task. When :func:`cancel` returns ``True`` it is guaranteed that the background task will notice the ``is_cancelled`` flag. :ivar is_cancelled: if the critical task has been cancelled. :ivar result: contains the result of the task, if it has already been completed. """ def __init__(self) -> None: """Create a new instance of :class:`CriticalTask`. Instantiate the lock and the task control attributes. """ self._lock = Lock() self.is_cancelled = False self.result = None def reset(self) -> None: """Must be called every time the background task is finished. .. note:: Must be called from async thread. Caller must hold lock on async executor when calling. """ self.is_cancelled = False self.result = None def cancel(self) -> bool: """Tries to cancel the task. .. note:: Caller must hold lock on async executor and the task when calling. :returns: ``False`` if the task has already run, or ``True`` it has been cancelled. """ if self.result is not None: return False self.is_cancelled = True return True def complete(self, result: Any) -> None: """Mark task as completed along with a *result*. .. note:: Must be called from async thread. Caller must hold lock on task when calling. """ self.result = result def __enter__(self) -> 'CriticalTask': """Acquire the object lock when entering the context manager.""" self._lock.acquire() return self def __exit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> None: """Release the object lock when exiting the context manager.""" self._lock.release() class AsyncExecutor(object): """Asynchronous executor of (long) tasks. :ivar critical_task: a :class:`CriticalTask` instance to handle execution of critical background tasks. """ def __init__(self, cancellable: CancellableSubprocess, ha_wakeup: Callable[..., None]) -> None: """Create a new instance of :class:`AsyncExecutor`. Configure the given *cancellable* and *ha_wakeup*, initializes the control attributes, and instantiate the lock and event objects that are used to access attributes and manage communication between threads. :param cancellable: a subprocess that supports being cancelled. :param ha_wakeup: function to wake up the HA loop. """ self._cancellable = cancellable self._ha_wakeup = ha_wakeup self._thread_lock = RLock() self._scheduled_action: Optional[str] = None self._scheduled_action_lock = RLock() self._is_cancelled = False self._finish_event = Event() self.critical_task = CriticalTask() @property def busy(self) -> bool: """``True`` if there is an action scheduled to occur, else ``False``.""" return self.scheduled_action is not None def schedule(self, action: str) -> Optional[str]: """Schedule *action* to be executed. .. note:: Must be called before executing a task. .. note:: *action* can only be scheduled if there is no other action currently scheduled. :param action: action to be executed. :returns: ``None`` if *action* has been successfully scheduled, or the previously scheduled action, if any. """ with self._scheduled_action_lock: if self._scheduled_action is not None: return self._scheduled_action self._scheduled_action = action self._is_cancelled = False self._finish_event.set() return None @property def scheduled_action(self) -> Optional[str]: """The currently scheduled action, if any, else ``None``.""" with self._scheduled_action_lock: return self._scheduled_action def reset_scheduled_action(self) -> None: """Unschedule a previously scheduled action, if any. .. note:: Must be called once the scheduled task finishes or is cancelled. """ with self._scheduled_action_lock: self._scheduled_action = None def run(self, func: Callable[..., Any], args: Tuple[Any, ...] = ()) -> Optional[Any]: """Run *func* with *args*. .. note:: Expected to be executed through a thread. :param func: function to be run. If it returns anything other than ``None``, HA loop will be woken up at the end of :func:`run` execution. :param args: arguments to be passed to *func*. :returns: ``None`` if *func* execution has been cancelled or faced any exception, otherwise the result of *func*. """ wakeup = False try: with self: if self._is_cancelled: return self._finish_event.clear() self._cancellable.reset_is_cancelled() # if the func returned something (not None) - wake up main HA loop wakeup = func(*args) if args else func() return wakeup except Exception: logger.exception('Exception during execution of long running task %s', self.scheduled_action) finally: with self: self.reset_scheduled_action() self._finish_event.set() with self.critical_task: self.critical_task.reset() if wakeup is not None: self._ha_wakeup() def run_async(self, func: Callable[..., Any], args: Tuple[Any, ...] = ()) -> None: """Start an async thread that runs *func* with *args*. :param func: function to be run. Will be passed through args to :class:`~threading.Thread` with a target of :func:`run`. :param args: arguments to be passed along to :class:`~threading.Thread` with *func*. """ Thread(target=self.run, args=(func, args)).start() def try_run_async(self, action: str, func: Callable[..., Any], args: Tuple[Any, ...] = ()) -> Optional[str]: """Try to run an async task, if none is currently being executed. :param action: name of the task to be executed. :param func: actual function that performs the task *action*. :param args: arguments to be passed to *func*. :returns: ``None`` if *func* was scheduled successfully, otherwise an error message informing of an already ongoing task. """ prev = self.schedule(action) if prev is None: return self.run_async(func, args) return 'Failed to run {0}, {1} is already in progress'.format(action, prev) def cancel(self) -> None: """Request cancellation of a scheduled async task, if any. .. note:: Wait until task is cancelled before returning control to caller. """ with self: with self._scheduled_action_lock: if self._scheduled_action is None: return logger.warning('Cancelling long running task %s', self._scheduled_action) self._is_cancelled = True self._cancellable.cancel() self._finish_event.wait() with self: self.reset_scheduled_action() def __enter__(self) -> 'AsyncExecutor': """Acquire the thread lock when entering the context manager.""" self._thread_lock.acquire() return self def __exit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> None: """Release the thread lock when exiting the context manager. .. note:: The arguments are not used, but we need them to match the expected method signature. """ self._thread_lock.release() patroni-3.2.2/patroni/collections.py000066400000000000000000000157061455170150700175540ustar00rootroot00000000000000"""Patroni custom object types somewhat like :mod:`collections` module. Provides a case insensitive :class:`dict` and :class:`set` object types. """ from collections import OrderedDict from typing import Any, Collection, Dict, Iterator, KeysView, MutableMapping, MutableSet, Optional class CaseInsensitiveSet(MutableSet[str]): """A case-insensitive :class:`set`-like object. Implements all methods and operations of :class:`~typing.MutableSet`. All values are expected to be strings. The structure remembers the case of the last value set, however, contains testing is case insensitive. """ def __init__(self, values: Optional[Collection[str]] = None) -> None: """Create a new instance of :class:`CaseInsensitiveSet` with the given *values*. :param values: values to be added to the set. """ self._values: Dict[str, str] = {} for v in values or (): self.add(v) def __repr__(self) -> str: """Get a string representation of the set. Provide a helpful way of recreating the set. :returns: representation of the set, showing its values. :Example: >>> repr(CaseInsensitiveSet(('1', 'test', 'Test', 'TESt', 'test2'))) # doctest: +ELLIPSIS "'.format(type(self).__name__, tuple(self._values.values()), id(self)) def __str__(self) -> str: """Get set values for printing. :returns: set of values in string format. :Example: >>> str(CaseInsensitiveSet(('1', 'test', 'Test', 'TESt', 'test2'))) # doctest: +SKIP "{'TESt', 'test2', '1'}" """ return str(set(self._values.values())) def __contains__(self, value: str) -> bool: """Check if set contains *value*. The check is performed case-insensitively. :param value: value to be checked. :returns: ``True`` if *value* is already in the set, ``False`` otherwise. """ return value.lower() in self._values def __iter__(self) -> Iterator[str]: """Iterate over the values in this set. :yields: values from set. """ return iter(self._values.values()) def __len__(self) -> int: """Get the length of this set. :returns: number of values in the set. :Example: >>> len(CaseInsensitiveSet(('1', 'test', 'Test', 'TESt', 'test2'))) 3 """ return len(self._values) def add(self, value: str) -> None: """Add *value* to this set. Search is performed case-insensitively. If *value* is already in the set, overwrite it with *value*, so we "remember" the last case of *value*. :param value: value to be added to the set. """ self._values[value.lower()] = value def discard(self, value: str) -> None: """Remove *value* from this set. Search is performed case-insensitively. If *value* is not present in the set, no exception is raised. :param value: value to be removed from the set. """ self._values.pop(value.lower(), None) def issubset(self, other: 'CaseInsensitiveSet') -> bool: """Check if this set is a subset of *other*. :param other: another set to be compared with this set. :returns: ``True`` if this set is a subset of *other*, else ``False``. """ return self <= other class CaseInsensitiveDict(MutableMapping[str, Any]): """A case-insensitive :class:`dict`-like object. Implements all methods and operations of :class:`~typing.MutableMapping` as well as :class:`dict`'s :func:`~dict.copy`. All keys are expected to be strings. The structure remembers the case of the last key to be set, and :func:`iter`, :func:`dict.keys`, :func:`dict.items`, :func:`dict.iterkeys`, and :func:`dict.iteritems` will contain case-sensitive keys. However, querying and contains testing is case insensitive. """ def __init__(self, data: Optional[Dict[str, Any]] = None) -> None: """Create a new instance of :class:`CaseInsensitiveDict` with the given *data*. :param data: initial dictionary to create a :class:`CaseInsensitiveDict` from. """ self._values: OrderedDict[str, Any] = OrderedDict() self.update(data or {}) def __setitem__(self, key: str, value: Any) -> None: """Assign *value* to *key* in this dict. *key* is searched/stored case-insensitively in the dict. The corresponding value in the dict is a tuple of: * original *key*; * *value*. :param key: key to be created or updated in the dict. :param value: value for *key*. """ self._values[key.lower()] = (key, value) def __getitem__(self, key: str) -> Any: """Get the value corresponding to *key*. *key* is searched case-insensitively in the dict. .. note: If *key* is not present in the dict, :class:`KeyError` will be triggered. :param key: key to be searched in the dict. :returns: value corresponding to *key*. """ return self._values[key.lower()][1] def __delitem__(self, key: str) -> None: """Remove *key* from this dict. *key* is searched case-insensitively in the dict. .. note: If *key* is not present in the dict, :class:`KeyError` will be triggered. :param key: key to be removed from the dict. """ del self._values[key.lower()] def __iter__(self) -> Iterator[str]: """Iterate over keys of this dict. :yields: each key present in the dict. Yields each key with its last case that has been stored. """ return iter(key for key, _ in self._values.values()) def __len__(self) -> int: """Get the length of this dict. :returns: number of keys in the dict. :Example: >>> len(CaseInsensitiveDict({'a': 'b', 'A': 'B', 'c': 'd'})) 2 """ return len(self._values) def copy(self) -> 'CaseInsensitiveDict': """Create a copy of this dict. :return: a new dict object with the same keys and values of this dict. """ return CaseInsensitiveDict({v[0]: v[1] for v in self._values.values()}) def keys(self) -> KeysView[str]: """Return a new view of the dict's keys. :returns: a set-like object providing a view on the dict's keys """ return self._values.keys() def __repr__(self) -> str: """Get a string representation of the dict. Provide a helpful way of recreating the dict. :returns: representation of the dict, showing its keys and values. :Example: >>> repr(CaseInsensitiveDict({'a': 'b', 'A': 'B', 'c': 'd'})) # doctest: +ELLIPSIS "'.format(type(self).__name__, dict(self.items()), id(self)) patroni-3.2.2/patroni/config.py000066400000000000000000001310151455170150700164730ustar00rootroot00000000000000"""Facilities related to Patroni configuration.""" import json import logging import os import shutil import tempfile import yaml from collections import defaultdict from copy import deepcopy from typing import Any, Callable, Collection, Dict, List, Optional, Union, TYPE_CHECKING from . import PATRONI_ENV_PREFIX from .collections import CaseInsensitiveDict from .dcs import ClusterConfig, Cluster from .exceptions import ConfigParseError from .file_perm import pg_perm from .postgresql.config import ConfigHandler from .validator import IntValidator from .utils import deep_compare, parse_bool, parse_int, patch_config logger = logging.getLogger(__name__) _AUTH_ALLOWED_PARAMETERS = ( 'username', 'password', 'sslmode', 'sslcert', 'sslkey', 'sslpassword', 'sslrootcert', 'sslcrl', 'sslcrldir', 'gssencmode', 'channel_binding' ) def default_validator(conf: Dict[str, Any]) -> List[str]: """Ensure *conf* is not empty. Designed to be used as default validator for :class:`Config` objects, if no specific validator is provided. :param conf: configuration to be validated. :returns: an empty list -- :class:`Config` expects the validator to return a list of 0 or more issues found while validating the configuration. :raises: :class:`ConfigParseError`: if *conf* is empty. """ if not conf: raise ConfigParseError("Config is empty.") return [] class GlobalConfig(object): """A class that wraps global configuration and provides convenient methods to access/check values. It is instantiated either by calling :func:`get_global_config` or :meth:`Config.get_global_config`, which picks either a configuration from provided :class:`Cluster` object (the most up-to-date) or from the local cache if :class:`ClusterConfig` is not initialized or doesn't have a valid config. """ def __init__(self, config: Dict[str, Any]) -> None: """Initialize :class:`GlobalConfig` object with given *config*. :param config: current configuration either from :class:`ClusterConfig` or from :func:`Config.dynamic_configuration`. """ self.__config = config def get(self, name: str) -> Any: """Gets global configuration value by *name*. :param name: parameter name. :returns: configuration value or ``None`` if it is missing. """ return self.__config.get(name) def check_mode(self, mode: str) -> bool: """Checks whether the certain parameter is enabled. :param mode: parameter name, e.g. ``synchronous_mode``, ``failsafe_mode``, ``pause``, ``check_timeline``, and so on. :returns: ``True`` if parameter *mode* is enabled in the global configuration. """ return bool(parse_bool(self.__config.get(mode))) @property def is_paused(self) -> bool: """``True`` if cluster is in maintenance mode.""" return self.check_mode('pause') @property def is_synchronous_mode(self) -> bool: """``True`` if synchronous replication is requested and it is not a standby cluster config.""" return self.check_mode('synchronous_mode') and not self.is_standby_cluster @property def is_synchronous_mode_strict(self) -> bool: """``True`` if at least one synchronous node is required.""" return self.check_mode('synchronous_mode_strict') def get_standby_cluster_config(self) -> Union[Dict[str, Any], Any]: """Get ``standby_cluster`` configuration. :returns: a copy of ``standby_cluster`` configuration. """ return deepcopy(self.get('standby_cluster')) @property def is_standby_cluster(self) -> bool: """``True`` if global configuration has a valid ``standby_cluster`` section.""" config = self.get_standby_cluster_config() return isinstance(config, dict) and\ bool(config.get('host') or config.get('port') or config.get('restore_command')) def get_int(self, name: str, default: int = 0) -> int: """Gets current value of *name* from the global configuration and try to return it as :class:`int`. :param name: name of the parameter. :param default: default value if *name* is not in the configuration or invalid. :returns: currently configured value of *name* from the global configuration or *default* if it is not set or invalid. """ ret = parse_int(self.get(name)) return default if ret is None else ret @property def min_synchronous_nodes(self) -> int: """The minimal number of synchronous nodes based on whether ``synchronous_mode_strict`` is enabled or not.""" return 1 if self.is_synchronous_mode_strict else 0 @property def synchronous_node_count(self) -> int: """Currently configured value of ``synchronous_node_count`` from the global configuration. Assume ``1`` if it is not set or invalid. """ return max(self.get_int('synchronous_node_count', 1), self.min_synchronous_nodes) @property def maximum_lag_on_failover(self) -> int: """Currently configured value of ``maximum_lag_on_failover`` from the global configuration. Assume ``1048576`` if it is not set or invalid. """ return self.get_int('maximum_lag_on_failover', 1048576) @property def maximum_lag_on_syncnode(self) -> int: """Currently configured value of ``maximum_lag_on_syncnode`` from the global configuration. Assume ``-1`` if it is not set or invalid. """ return self.get_int('maximum_lag_on_syncnode', -1) @property def primary_start_timeout(self) -> int: """Currently configured value of ``primary_start_timeout`` from the global configuration. Assume ``300`` if it is not set or invalid. .. note:: ``master_start_timeout`` is still supported to keep backward compatibility. """ default = 300 return self.get_int('primary_start_timeout', default)\ if 'primary_start_timeout' in self.__config else self.get_int('master_start_timeout', default) @property def primary_stop_timeout(self) -> int: """Currently configured value of ``primary_stop_timeout`` from the global configuration. Assume ``0`` if it is not set or invalid. .. note:: ``master_stop_timeout`` is still supported to keep backward compatibility. """ default = 0 return self.get_int('primary_stop_timeout', default)\ if 'primary_stop_timeout' in self.__config else self.get_int('master_stop_timeout', default) def get_global_config(cluster: Optional[Cluster], default: Optional[Dict[str, Any]] = None) -> GlobalConfig: """Instantiates :class:`GlobalConfig` based on the input. :param cluster: the currently known cluster state from DCS. :param default: default configuration, which will be used if there is no valid *cluster.config*. :returns: :class:`GlobalConfig` object. """ # Try to protect from the case when DCS was wiped out if cluster and cluster.config and cluster.config.modify_version: config = cluster.config.data else: config = default or {} return GlobalConfig(deepcopy(config)) class Config(object): """Handle Patroni configuration. This class is responsible for: 1) Building and giving access to ``effective_configuration`` from: * ``Config.__DEFAULT_CONFIG`` -- some sane default values; * ``dynamic_configuration`` -- configuration stored in DCS; * ``local_configuration`` -- configuration from `config.yml` or environment. 2) Saving and loading ``dynamic_configuration`` into 'patroni.dynamic.json' file located in local_configuration['postgresql']['data_dir'] directory. This is necessary to be able to restore ``dynamic_configuration`` if DCS was accidentally wiped. 3) Loading of configuration file in the old format and converting it into new format. 4) Mimicking some ``dict`` interfaces to make it possible to work with it as with the old ``config`` object. :cvar PATRONI_CONFIG_VARIABLE: name of the environment variable that can be used to load Patroni configuration from. :cvar __CACHE_FILENAME: name of the file used to cache dynamic configuration under Postgres data directory. :cvar __DEFAULT_CONFIG: default configuration values for some Patroni settings. """ PATRONI_CONFIG_VARIABLE = PATRONI_ENV_PREFIX + 'CONFIGURATION' __CACHE_FILENAME = 'patroni.dynamic.json' __DEFAULT_CONFIG: Dict[str, Any] = { 'ttl': 30, 'loop_wait': 10, 'retry_timeout': 10, 'standby_cluster': { 'create_replica_methods': '', 'host': '', 'port': '', 'primary_slot_name': '', 'restore_command': '', 'archive_cleanup_command': '', 'recovery_min_apply_delay': '' }, 'postgresql': { 'use_slots': True, 'parameters': CaseInsensitiveDict({p: v[0] for p, v in ConfigHandler.CMDLINE_OPTIONS.items() if v[0] is not None and p not in ('wal_keep_segments', 'wal_keep_size')}) } } def __init__(self, configfile: str, validator: Optional[Callable[[Dict[str, Any]], List[str]]] = default_validator) -> None: """Create a new instance of :class:`Config` and validate the loaded configuration using *validator*. .. note:: Patroni will read configuration from these locations in this order: * file or directory path passed as command-line argument (*configfile*), if it exists and the file or files found in the directory can be parsed (see :meth:`~Config._load_config_path`), otherwise * YAML file passed via the environment variable (see :attr:`PATRONI_CONFIG_VARIABLE`), if the referenced file exists and can be parsed, otherwise * from configuration values defined as environment variables, see :meth:`~Config._build_environment_configuration`. :param configfile: path to Patroni configuration file. :param validator: function used to validate Patroni configuration. It should receive a dictionary which represents Patroni configuration, and return a list of zero or more error messages based on validation. :raises: :class:`ConfigParseError`: if any issue is reported by *validator*. """ self._modify_version = -1 self._dynamic_configuration = {} self.__environment_configuration = self._build_environment_configuration() self._config_file = configfile if configfile and os.path.exists(configfile) else None if self._config_file: self._local_configuration = self._load_config_file() else: config_env = os.environ.pop(self.PATRONI_CONFIG_VARIABLE, None) self._local_configuration = config_env and yaml.safe_load(config_env) or self.__environment_configuration if validator: errors = validator(self._local_configuration) if errors: raise ConfigParseError("\n".join(errors)) self.__effective_configuration = self._build_effective_configuration({}, self._local_configuration) self._data_dir = self.__effective_configuration.get('postgresql', {}).get('data_dir', "") self._cache_file = os.path.join(self._data_dir, self.__CACHE_FILENAME) if validator: # patronictl uses validator=None self._load_cache() # we don't want to load anything from local cache for ctl self._validate_failover_tags() # irrelevant for ctl self._cache_needs_saving = False @property def config_file(self) -> Optional[str]: """Path to Patroni configuration file, if any, else ``None``.""" return self._config_file @property def dynamic_configuration(self) -> Dict[str, Any]: """Deep copy of cached Patroni dynamic configuration.""" return deepcopy(self._dynamic_configuration) @property def local_configuration(self) -> Dict[str, Any]: """Deep copy of cached Patroni local configuration. :returns: copy of :attr:`~Config._local_configuration` """ return deepcopy(dict(self._local_configuration)) @classmethod def get_default_config(cls) -> Dict[str, Any]: """Deep copy default configuration. :returns: copy of :attr:`~Config.__DEFAULT_CONFIG` """ return deepcopy(cls.__DEFAULT_CONFIG) def _load_config_path(self, path: str) -> Dict[str, Any]: """Load Patroni configuration file(s) from *path*. If *path* is a file, load the yml file pointed to by *path*. If *path* is a directory, load all yml files in that directory in alphabetical order. :param path: path to either an YAML configuration file, or to a folder containing YAML configuration files. :returns: configuration after reading the configuration file(s) from *path*. :raises: :class:`ConfigParseError`: if *path* is invalid. """ if os.path.isfile(path): files = [path] elif os.path.isdir(path): files = [os.path.join(path, f) for f in sorted(os.listdir(path)) if (f.endswith('.yml') or f.endswith('.yaml')) and os.path.isfile(os.path.join(path, f))] else: logger.error('config path %s is neither directory nor file', path) raise ConfigParseError('invalid config path') overall_config: Dict[str, Any] = {} for fname in files: with open(fname) as f: config = yaml.safe_load(f) patch_config(overall_config, config) return overall_config def _load_config_file(self) -> Dict[str, Any]: """Load configuration file(s) from filesystem and apply values which were set via environment variables. :returns: final configuration after merging configuration file(s) and environment variables. """ if TYPE_CHECKING: # pragma: no cover assert self.config_file is not None config = self._load_config_path(self.config_file) patch_config(config, self.__environment_configuration) return config def _load_cache(self) -> None: """Load dynamic configuration from ``patroni.dynamic.json``.""" if os.path.isfile(self._cache_file): try: with open(self._cache_file) as f: self.set_dynamic_configuration(json.load(f)) except Exception: logger.exception('Exception when loading file: %s', self._cache_file) def save_cache(self) -> None: """Save dynamic configuration to ``patroni.dynamic.json`` under Postgres data directory. .. note:: ``patroni.dynamic.jsonXXXXXX`` is created as a temporary file and than renamed to ``patroni.dynamic.json``, where ``XXXXXX`` is a random suffix. """ if self._cache_needs_saving: tmpfile = fd = None try: pg_perm.set_permissions_from_data_directory(self._data_dir) (fd, tmpfile) = tempfile.mkstemp(prefix=self.__CACHE_FILENAME, dir=self._data_dir) with os.fdopen(fd, 'w') as f: fd = None json.dump(self.dynamic_configuration, f) tmpfile = shutil.move(tmpfile, self._cache_file) os.chmod(self._cache_file, pg_perm.file_create_mode) self._cache_needs_saving = False except Exception: logger.exception('Exception when saving file: %s', self._cache_file) if fd: try: os.close(fd) except Exception: logger.error('Can not close temporary file %s', tmpfile) if tmpfile and os.path.exists(tmpfile): try: os.remove(tmpfile) except Exception: logger.error('Can not remove temporary file %s', tmpfile) def __get_and_maybe_adjust_int_value(self, config: Dict[str, Any], param: str, min_value: int) -> int: """Get, validate and maybe adjust a *param* integer value from the *config* :class:`dict`. .. note: If the value is smaller than provided *min_value* we update the *config*. This method may raise an exception if value isn't :class:`int` or cannot be casted to :class:`int`. :param config: :class:`dict` object with new global configuration. :param param: name of the configuration parameter we want to read/validate/adjust. :param min_value: the minimum possible value that a given *param* could have. :returns: an integer value which corresponds to a provided *param*. """ value = int(config.get(param, self.__DEFAULT_CONFIG[param])) if value < min_value: logger.warning("%s=%d can't be smaller than %d, adjusting...", param, value, min_value) value = config[param] = min_value return value def _validate_and_adjust_timeouts(self, config: Dict[str, Any]) -> None: """Validate and adjust ``loop_wait``, ``retry_timeout``, and ``ttl`` values if necessary. Minimum values: * ``loop_wait``: 1 second; * ``retry_timeout``: 3 seconds. * ``ttl``: 20 seconds; Maximum values: In case if values don't fulfill the following rule, ``retry_timeout`` and ``loop_wait`` are reduced so that the rule is fulfilled: .. code-block:: python loop_wait + 2 * retry_timeout <= ttl .. note: We prefer to reduce ``loop_wait`` and will reduce ``retry_timeout`` only if ``loop_wait`` is already set to a minimal possible value. :param config: :class:`dict` object with new global configuration. """ min_loop_wait = 1 loop_wait = self. __get_and_maybe_adjust_int_value(config, 'loop_wait', min_loop_wait) retry_timeout = self. __get_and_maybe_adjust_int_value(config, 'retry_timeout', 3) ttl = self. __get_and_maybe_adjust_int_value(config, 'ttl', 20) if min_loop_wait + 2 * retry_timeout > ttl: config['loop_wait'] = min_loop_wait config['retry_timeout'] = (ttl - min_loop_wait) // 2 logger.warning('Violated the rule "loop_wait + 2*retry_timeout <= ttl", where ttl=%d. ' 'Adjusting loop_wait from %d to %d and retry_timeout from %d to %d', ttl, loop_wait, min_loop_wait, retry_timeout, config['retry_timeout']) elif loop_wait + 2 * retry_timeout > ttl: config['loop_wait'] = ttl - 2 * retry_timeout logger.warning('Violated the rule "loop_wait + 2*retry_timeout <= ttl", where ttl=%d and retry_timeout=%d.' ' Adjusting loop_wait from %d to %d', ttl, retry_timeout, loop_wait, config['loop_wait']) # configuration could be either ClusterConfig or dict def set_dynamic_configuration(self, configuration: Union[ClusterConfig, Dict[str, Any]]) -> bool: """Set dynamic configuration values with given *configuration*. :param configuration: new dynamic configuration values. Supports :class:`dict` for backward compatibility. :returns: ``True`` if changes have been detected between current dynamic configuration and the new dynamic *configuration*, ``False`` otherwise. """ if isinstance(configuration, ClusterConfig): if self._modify_version == configuration.modify_version: return False # If the version didn't change there is nothing to do self._modify_version = configuration.modify_version configuration = configuration.data if not deep_compare(self._dynamic_configuration, configuration): try: self._validate_and_adjust_timeouts(configuration) self.__effective_configuration = self._build_effective_configuration(configuration, self._local_configuration) self._dynamic_configuration = configuration self._cache_needs_saving = True return True except Exception: logger.exception('Exception when setting dynamic_configuration') return False def reload_local_configuration(self) -> Optional[bool]: """Reload configuration values from the configuration file(s). .. note:: Designed to be used when user applies changes to configuration file(s), so Patroni can use the new values with a reload instead of a restart. :returns: ``True`` if changes have been detected between current local configuration """ if self.config_file: try: configuration = self._load_config_file() if not deep_compare(self._local_configuration, configuration): new_configuration = self._build_effective_configuration(self._dynamic_configuration, configuration) self._local_configuration = configuration self.__effective_configuration = new_configuration self._validate_failover_tags() return True else: logger.info('No local configuration items changed.') except Exception: logger.exception('Exception when reloading local configuration from %s', self.config_file) @staticmethod def _process_postgresql_parameters(parameters: Dict[str, Any], is_local: bool = False) -> Dict[str, Any]: """Process Postgres *parameters*. .. note:: If *is_local* configuration discard any setting from *parameters* that is listed under :attr:`~patroni.postgresql.config.ConfigHandler.CMDLINE_OPTIONS` as those are supposed to be set only through dynamic configuration. When setting parameters from :attr:`~patroni.postgresql.config.ConfigHandler.CMDLINE_OPTIONS` through dynamic configuration their value will be validated as per the validator defined in that very same attribute entry. If the given value cannot be validated, a warning will be logged and the default value of the GUC will be used instead. Some parameters from :attr:`~patroni.postgresql.config.ConfigHandler.CMDLINE_OPTIONS` cannot be set even if not *is_local* configuration: * ``listen_addresses``: inferred from ``postgresql.listen`` local configuration or from ``PATRONI_POSTGRESQL_LISTEN`` environment variable; * ``port``: inferred from ``postgresql.listen`` local configuration or from ``PATRONI_POSTGRESQL_LISTEN`` environment variable; * ``cluster_name``: set through ``scope`` local configuration or through ``PATRONI_SCOPE`` environment variable; * ``hot_standby``: always enabled; * ``wal_log_hints``: always enabled. :param parameters: Postgres parameters to be processed. Should be the parsed YAML value of ``postgresql.parameters`` configuration, either from local or from dynamic configuration. :param is_local: should be ``True`` if *parameters* refers to local configuration, or ``False`` if *parameters* refers to dynamic configuration. :returns: new value for ``postgresql.parameters`` after processing and validating *parameters*. """ pg_params: Dict[str, Any] = {} for name, value in (parameters or {}).items(): if name not in ConfigHandler.CMDLINE_OPTIONS: pg_params[name] = value elif not is_local: validator = ConfigHandler.CMDLINE_OPTIONS[name][1] if validator(value): int_val = parse_int(value) if isinstance(validator, IntValidator) else None pg_params[name] = int_val if isinstance(int_val, int) else value else: logger.warning("postgresql parameter %s=%s failed validation, defaulting to %s", name, value, ConfigHandler.CMDLINE_OPTIONS[name][0]) return pg_params def _safe_copy_dynamic_configuration(self, dynamic_configuration: Dict[str, Any]) -> Dict[str, Any]: """Create a copy of *dynamic_configuration*. Merge *dynamic_configuration* with :attr:`__DEFAULT_CONFIG` (*dynamic_configuration* takes precedence), and process ``postgresql.parameters`` from *dynamic_configuration* through :func:`_process_postgresql_parameters`, if present. .. note:: The following settings are not allowed in ``postgresql`` section as they are intended to be local configuration, and are removed if present: * ``connect_address``; * ``proxy_address``; * ``listen``; * ``config_dir``; * ``data_dir``; * ``pgpass``; * ``authentication``; Besides that any setting present in *dynamic_configuration* but absent from :attr:`__DEFAULT_CONFIG` is discarded. :param dynamic_configuration: Patroni dynamic configuration. :returns: copy of *dynamic_configuration*, merged with default dynamic configuration and with some sanity checks performed over it. """ config = self.get_default_config() for name, value in dynamic_configuration.items(): if name == 'postgresql': for name, value in (value or {}).items(): if name == 'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value)) elif name not in ('connect_address', 'proxy_address', 'listen', 'config_dir', 'data_dir', 'pgpass', 'authentication'): config['postgresql'][name] = deepcopy(value) elif name == 'standby_cluster': for name, value in (value or {}).items(): if name in self.__DEFAULT_CONFIG['standby_cluster']: config['standby_cluster'][name] = deepcopy(value) elif name in config: # only variables present in __DEFAULT_CONFIG allowed to be overridden from DCS config[name] = int(value) return config @staticmethod def _build_environment_configuration() -> Dict[str, Any]: """Get local configuration settings that were specified through environment variables. :returns: dictionary containing the found environment variables and their values, respecting the expected structure of Patroni configuration. """ ret: Dict[str, Any] = defaultdict(dict) def _popenv(name: str) -> Optional[str]: """Get value of environment variable *name*. .. note:: *name* is prefixed with :data:`~patroni.PATRONI_ENV_PREFIX` when searching in the environment. Also, the corresponding environment variable is removed from the environment upon reading its value. :param name: name of the environment variable. :returns: value of *name*, if present in the environment, otherwise ``None``. """ return os.environ.pop(PATRONI_ENV_PREFIX + name.upper(), None) for param in ('name', 'namespace', 'scope'): value = _popenv(param) if value: ret[param] = value def _fix_log_env(name: str, oldname: str) -> None: """Normalize a log related environment variable. .. note:: Patroni used to support different names for log related environment variables in the past. As the environment variables were renamed, this function takes care of mapping and normalizing the environment. *name* is prefixed with :data:`~patroni.PATRONI_ENV_PREFIX` and ``LOG`` when searching in the environment. *oldname* is prefixed with :data:`~patroni.PATRONI_ENV_PREFIX` when searching in the environment. If both *name* and *oldname* are set in the environment, *name* takes precedence. :param name: new name of a log related environment variable. :param oldname: original name of a log related environment variable. :type oldname: str """ value = _popenv(oldname) name = PATRONI_ENV_PREFIX + 'LOG_' + name.upper() if value and name not in os.environ: os.environ[name] = value for name, oldname in (('level', 'loglevel'), ('format', 'logformat'), ('dateformat', 'log_datefmt')): _fix_log_env(name, oldname) def _set_section_values(section: str, params: List[str]) -> None: """Get value of *params* environment variables that are related with *section*. .. note:: The values are retrieved from the environment and updated directly into the returning dictionary of :func:`_build_environment_configuration`. :param section: configuration section the *params* belong to. :param params: name of the Patroni settings. """ for param in params: value = _popenv(section + '_' + param) if value: ret[section][param] = value _set_section_values('restapi', ['listen', 'connect_address', 'certfile', 'keyfile', 'keyfile_password', 'cafile', 'ciphers', 'verify_client', 'http_extra_headers', 'https_extra_headers', 'allowlist', 'allowlist_include_members', 'request_queue_size']) _set_section_values('ctl', ['insecure', 'cacert', 'certfile', 'keyfile', 'keyfile_password']) _set_section_values('postgresql', ['listen', 'connect_address', 'proxy_address', 'config_dir', 'data_dir', 'pgpass', 'bin_dir']) _set_section_values('log', ['level', 'traceback_level', 'format', 'dateformat', 'max_queue_size', 'dir', 'file_size', 'file_num', 'loggers']) _set_section_values('raft', ['data_dir', 'self_addr', 'partner_addrs', 'password', 'bind_addr']) for binary in ('pg_ctl', 'initdb', 'pg_controldata', 'pg_basebackup', 'postgres', 'pg_isready', 'pg_rewind'): value = _popenv('POSTGRESQL_BIN_' + binary) if value: ret['postgresql'].setdefault('bin_name', {})[binary] = value # parse all values retrieved from the environment as Python objects, according to the expected type for first, second in (('restapi', 'allowlist_include_members'), ('ctl', 'insecure')): value = ret.get(first, {}).pop(second, None) if value: value = parse_bool(value) if value is not None: ret[first][second] = value for first, params in (('restapi', ('request_queue_size',)), ('log', ('max_queue_size', 'file_size', 'file_num'))): for second in params: value = ret.get(first, {}).pop(second, None) if value: value = parse_int(value) if value is not None: ret[first][second] = value def _parse_list(value: str) -> Optional[List[str]]: """Parse an YAML list *value* as a :class:`list`. :param value: YAML list as a string. :returns: *value* as :class:`list`. """ if not (value.strip().startswith('-') or '[' in value): value = '[{0}]'.format(value) try: return yaml.safe_load(value) except Exception: logger.exception('Exception when parsing list %s', value) return None for first, second in (('raft', 'partner_addrs'), ('restapi', 'allowlist')): value = ret.get(first, {}).pop(second, None) if value: value = _parse_list(value) if value: ret[first][second] = value def _parse_dict(value: str) -> Optional[Dict[str, Any]]: """Parse an YAML dictionary *value* as a :class:`dict`. :param value: YAML dictionary as a string. :returns: *value* as :class:`dict`. """ if not value.strip().startswith('{'): value = '{{{0}}}'.format(value) try: return yaml.safe_load(value) except Exception: logger.exception('Exception when parsing dict %s', value) return None for first, params in (('restapi', ('http_extra_headers', 'https_extra_headers')), ('log', ('loggers',))): for second in params: value = ret.get(first, {}).pop(second, None) if value: value = _parse_dict(value) if value: ret[first][second] = value def _get_auth(name: str, params: Collection[str] = _AUTH_ALLOWED_PARAMETERS[:2]) -> Dict[str, str]: """Get authorization related environment variables *params* from section *name*. :param name: name of a configuration section that may contain authorization *params*. :param params: the authorization settings that may be set under section *name*. :returns: dictionary containing environment values for authorization *params* of section *name*. """ ret: Dict[str, str] = {} for param in params: value = _popenv(name + '_' + param) if value: ret[param] = value return ret for section in ('ctl', 'restapi'): auth = _get_auth(section) if auth: ret[section]['authentication'] = auth authentication = {} for user_type in ('replication', 'superuser', 'rewind'): entry = _get_auth(user_type, _AUTH_ALLOWED_PARAMETERS) if entry: authentication[user_type] = entry if authentication: ret['postgresql']['authentication'] = authentication for param in list(os.environ.keys()): if param.startswith(PATRONI_ENV_PREFIX): # PATRONI_(ETCD|CONSUL|ZOOKEEPER|EXHIBITOR|...)_(HOSTS?|PORT|..) name, suffix = (param[len(PATRONI_ENV_PREFIX):].split('_', 1) + [''])[:2] if suffix in ('HOST', 'HOSTS', 'PORT', 'USE_PROXIES', 'PROTOCOL', 'SRV', 'SRV_SUFFIX', 'URL', 'PROXY', 'CACERT', 'CERT', 'KEY', 'VERIFY', 'TOKEN', 'CHECKS', 'DC', 'CONSISTENCY', 'REGISTER_SERVICE', 'SERVICE_CHECK_INTERVAL', 'SERVICE_CHECK_TLS_SERVER_NAME', 'SERVICE_TAGS', 'NAMESPACE', 'CONTEXT', 'USE_ENDPOINTS', 'SCOPE_LABEL', 'ROLE_LABEL', 'POD_IP', 'PORTS', 'LABELS', 'BYPASS_API_SERVICE', 'RETRIABLE_HTTP_CODES', 'KEY_PASSWORD', 'USE_SSL', 'SET_ACLS', 'GROUP', 'DATABASE', 'LEADER_LABEL_VALUE', 'FOLLOWER_LABEL_VALUE', 'STANDBY_LEADER_LABEL_VALUE', 'TMP_ROLE_LABEL') and name: value = os.environ.pop(param) if name == 'CITUS': if suffix == 'GROUP': value = parse_int(value) elif suffix != 'DATABASE': continue elif suffix == 'PORT': value = value and parse_int(value) elif suffix in ('HOSTS', 'PORTS', 'CHECKS', 'SERVICE_TAGS', 'RETRIABLE_HTTP_CODES'): value = value and _parse_list(value) elif suffix in ('LABELS', 'SET_ACLS'): value = _parse_dict(value) elif suffix in ('USE_PROXIES', 'REGISTER_SERVICE', 'USE_ENDPOINTS', 'BYPASS_API_SERVICE', 'VERIFY'): value = parse_bool(value) if value is not None: ret[name.lower()][suffix.lower()] = value for dcs in ('etcd', 'etcd3'): if dcs in ret: ret[dcs].update(_get_auth(dcs)) users = {} for param in list(os.environ.keys()): if param.startswith(PATRONI_ENV_PREFIX): name, suffix = (param[len(PATRONI_ENV_PREFIX):].rsplit('_', 1) + [''])[:2] # PATRONI__PASSWORD=, PATRONI__OPTIONS= # CREATE USER "" WITH PASSWORD '' if name and suffix == 'PASSWORD': password = os.environ.pop(param) if password: users[name] = {'password': password} options = os.environ.pop(param[:-9] + '_OPTIONS', None) # replace "_PASSWORD" with "_OPTIONS" options = options and _parse_list(options) if options: users[name]['options'] = options if users: ret['bootstrap']['users'] = users return ret def _build_effective_configuration(self, dynamic_configuration: Dict[str, Any], local_configuration: Dict[str, Union[Dict[str, Any], Any]]) -> Dict[str, Any]: """Build effective configuration by merging *dynamic_configuration* and *local_configuration*. .. note:: *local_configuration* takes precedence over *dynamic_configuration* if a setting is defined in both. :param dynamic_configuration: Patroni dynamic configuration. :param local_configuration: Patroni local configuration. :returns: _description_ """ config = self._safe_copy_dynamic_configuration(dynamic_configuration) for name, value in local_configuration.items(): if name == 'citus': # remove invalid citus configuration if isinstance(value, dict) and isinstance(value.get('group'), int)\ and isinstance(value.get('database'), str): config[name] = value elif name == 'postgresql': for name, value in (value or {}).items(): if name == 'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value, True)) elif name != 'use_slots': # replication slots must be enabled/disabled globally config['postgresql'][name] = deepcopy(value) elif name not in config or name in ['watchdog']: config[name] = deepcopy(value) if value else {} # restapi server expects to get restapi.auth = 'username:password' and similarly for `ctl` for section in ('ctl', 'restapi'): if section in config and 'authentication' in config[section]: config[section]['auth'] = '{username}:{password}'.format(**config[section]['authentication']) # special treatment for old config # 'exhibitor' inside 'zookeeper': if 'zookeeper' in config and 'exhibitor' in config['zookeeper']: config['exhibitor'] = config['zookeeper'].pop('exhibitor') config.pop('zookeeper') pg_config = config['postgresql'] # no 'authentication' in 'postgresql', but 'replication' and 'superuser' if 'authentication' not in pg_config: pg_config['use_pg_rewind'] = 'pg_rewind' in pg_config pg_config['authentication'] = {u: pg_config[u] for u in ('replication', 'superuser') if u in pg_config} # no 'superuser' in 'postgresql'.'authentication' if 'superuser' not in pg_config['authentication'] and 'pg_rewind' in pg_config: pg_config['authentication']['superuser'] = pg_config['pg_rewind'] # handle setting additional connection parameters that may be available # in the configuration file, such as SSL connection parameters for name, value in pg_config['authentication'].items(): pg_config['authentication'][name] = {n: v for n, v in value.items() if n in _AUTH_ALLOWED_PARAMETERS} # no 'name' in config if 'name' not in config and 'name' in pg_config: config['name'] = pg_config['name'] # when bootstrapping the new Citus cluster (coordinator/worker) enable sync replication in global configuration if 'citus' in config: bootstrap = config.setdefault('bootstrap', {}) dcs = bootstrap.setdefault('dcs', {}) dcs.setdefault('synchronous_mode', True) updated_fields = ( 'name', 'scope', 'retry_timeout', 'citus' ) pg_config.update({p: config[p] for p in updated_fields if p in config}) return config def get(self, key: str, default: Optional[Any] = None) -> Any: """Get effective value of ``key`` setting from Patroni configuration root. Designed to work the same way as :func:`dict.get`. :param key: name of the setting. :param default: default value if *key* is not present in the effective configuration. :returns: value of *key*, if present in the effective configuration, otherwise *default*. """ return self.__effective_configuration.get(key, default) def __contains__(self, key: str) -> bool: """Check if setting *key* is present in the effective configuration. Designed to work the same way as :func:`dict.__contains__`. :param key: name of the setting to be checked. :returns: ``True`` if setting *key* exists in effective configuration, else ``False``. """ return key in self.__effective_configuration def __getitem__(self, key: str) -> Any: """Get value of setting *key* from effective configuration. Designed to work the same way as :func:`dict.__getitem__`. :param key: name of the setting. :returns: value of setting *key*. :raises: :class:`KeyError`: if *key* is not present in effective configuration. """ return self.__effective_configuration[key] def copy(self) -> Dict[str, Any]: """Get a deep copy of effective Patroni configuration. :returns: a deep copy of the Patroni configuration. """ return deepcopy(self.__effective_configuration) def get_global_config(self, cluster: Optional[Cluster]) -> GlobalConfig: """Instantiate :class:`GlobalConfig` based on input. Use the configuration from provided *cluster* (the most up-to-date) or from the local cache if *cluster.config* is not initialized or doesn't have a valid config. :param cluster: the currently known cluster state from DCS. :returns: :class:`GlobalConfig` object. """ return get_global_config(cluster, self._dynamic_configuration) def _validate_failover_tags(self) -> None: """Check ``nofailover``/``failover_priority`` config and warn user if it's contradictory. .. note:: To preserve sanity (and backwards compatibility) the ``nofailover`` tag will still exist. A contradictory configuration is one where ``nofailover`` is ``True`` but ``failover_priority > 0``, or where ``nofailover`` is ``False``, but ``failover_priority <= 0``. Essentially, ``nofailover`` and ``failover_priority`` are communicating different things. This checks for this edge case (which is a misconfiguration on the part of the user) and warns them. The behaviour is as if ``failover_priority`` were not provided (i.e ``nofailover`` is the bedrock source of truth) """ tags = self.get('tags', {}) if 'nofailover' not in tags: return nofailover_tag = tags.get('nofailover') failover_priority_tag = parse_int(tags.get('failover_priority')) if failover_priority_tag is not None \ and (bool(nofailover_tag) is True and failover_priority_tag > 0 or bool(nofailover_tag) is False and failover_priority_tag <= 0): logger.warning('Conflicting configuration between nofailover: %s and failover_priority: %s. ' 'Defaulting to nofailover: %s', nofailover_tag, failover_priority_tag, nofailover_tag) patroni-3.2.2/patroni/config_generator.py000066400000000000000000000557341455170150700205560ustar00rootroot00000000000000"""patroni ``--generate-config`` machinery.""" import abc import logging import os import psutil import socket import sys import yaml from getpass import getuser, getpass from contextlib import contextmanager from typing import Any, Dict, Iterator, List, Optional, TextIO, Tuple, TYPE_CHECKING, Union if TYPE_CHECKING: # pragma: no cover from psycopg import Cursor from psycopg2 import cursor from . import psycopg from .config import Config from .exceptions import PatroniException from .log import PatroniLogger from .postgresql.config import ConfigHandler, parse_dsn from .postgresql.misc import postgres_major_version_to_int from .utils import get_major_version, parse_bool, patch_config, read_stripped # Mapping between the libpq connection parameters and the environment variables. # This dict should be kept in sync with `patroni.utils._AUTH_ALLOWED_PARAMETERS` # (we use "username" in the Patroni config for some reason, other parameter names are the same). _AUTH_ALLOWED_PARAMETERS_MAPPING = { 'user': 'PGUSER', 'password': 'PGPASSWORD', 'sslmode': 'PGSSLMODE', 'sslcert': 'PGSSLCERT', 'sslkey': 'PGSSLKEY', 'sslpassword': '', 'sslrootcert': 'PGSSLROOTCERT', 'sslcrl': 'PGSSLCRL', 'sslcrldir': 'PGSSLCRLDIR', 'gssencmode': 'PGGSSENCMODE', 'channel_binding': 'PGCHANNELBINDING' } NO_VALUE_MSG = '#FIXME' def get_address() -> Tuple[str, str]: """Try to get hostname and the ip address for it returned by :func:`~socket.gethostname`. .. note:: Can also return local ip. :returns: tuple consisting of the hostname returned by :func:`~socket.gethostname` and the first element in the sorted list of the addresses returned by :func:`~socket.getaddrinfo`. Sorting guarantees it will prefer IPv4. If an exception occured, hostname and ip values are equal to :data:`~patroni.config_generator.NO_VALUE_MSG`. """ hostname = None try: hostname = socket.gethostname() return hostname, sorted(socket.getaddrinfo(hostname, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0), key=lambda x: x[0])[0][4][0] except Exception as err: logging.warning('Failed to obtain address: %r', err) return NO_VALUE_MSG, NO_VALUE_MSG class AbstractConfigGenerator(abc.ABC): """Object representing the generated Patroni config. :ivar output_file: full path to the output file to be used. :ivar pg_major: integer representation of the major PostgreSQL version. :ivar config: dictionary used for the generated configuration storage. """ _HOSTNAME, _IP = get_address() def __init__(self, output_file: Optional[str]) -> None: """Set up the output file (if passed), helper vars and the minimal config structure. :param output_file: full path to the output file to be used. """ self.output_file = output_file self.pg_major = 0 self.config = self.get_template_config() self.generate() @classmethod def get_template_config(cls) -> Dict[str, Any]: """Generate a template config for further extension (e.g. in the inherited classes). :returns: dictionary with the values gathered from Patroni env, hopefully defined hostname and ip address (otherwise set to :data:`~patroni.config_generator.NO_VALUE_MSG`), and some sane defaults. """ template_config: Dict[str, Any] = { 'scope': NO_VALUE_MSG, 'name': cls._HOSTNAME, 'restapi': { 'connect_address': cls._IP + ':8008', 'listen': cls._IP + ':8008' }, 'log': { 'level': PatroniLogger.DEFAULT_LEVEL, 'traceback_level': PatroniLogger.DEFAULT_TRACEBACK_LEVEL, 'format': PatroniLogger.DEFAULT_FORMAT, 'max_queue_size': PatroniLogger.DEFAULT_MAX_QUEUE_SIZE }, 'postgresql': { 'data_dir': NO_VALUE_MSG, 'connect_address': cls._IP + ':5432', 'listen': cls._IP + ':5432', 'bin_dir': '', 'authentication': { 'superuser': { 'username': 'postgres', 'password': NO_VALUE_MSG }, 'replication': { 'username': 'replicator', 'password': NO_VALUE_MSG } } }, 'tags': { 'failover_priority': 1, 'noloadbalance': False, 'clonefrom': True, 'nosync': False, } } dynamic_config = Config.get_default_config() # to properly dump CaseInsensitiveDict as YAML later dynamic_config['postgresql']['parameters'] = dict(dynamic_config['postgresql']['parameters']) config = Config('', None).local_configuration # Get values from env config.setdefault('bootstrap', {})['dcs'] = dynamic_config config.setdefault('postgresql', {}) del config['bootstrap']['dcs']['standby_cluster'] patch_config(template_config, config) return template_config @abc.abstractmethod def generate(self) -> None: """Generate config and store in :attr:`~AbstractConfigGenerator.config`.""" @staticmethod def _format_block(block: Any, line_prefix: str = '') -> str: """Format a single YAML block. .. note:: Optionally the formatted block could be indented with the *line_prefix* :param block: the object that should be formatted to YAML. :param line_prefix: is used for indentation. :returns: a formatted and indented *block*. """ return line_prefix + yaml.safe_dump(block, default_flow_style=False, line_break='\n', allow_unicode=True, indent=2).strip().replace('\n', '\n' + line_prefix) def _format_config_section(self, section_name: str) -> Iterator[str]: """Format and yield as single section of the current :attr:`~AbstractConfigGenerator.config`. .. note:: If the section is a :class:`dict` object we put an empty line before it. :param section_name: a section name in the :attr:`~AbstractConfigGenerator.config`. :yields: a formatted section in case if it exists in the :attr:`~AbstractConfigGenerator.config`. """ if section_name in self.config: if isinstance(self.config[section_name], dict): yield '' yield self._format_block({section_name: self.config[section_name]}) def _format_config(self) -> Iterator[str]: """Format current :attr:`~AbstractConfigGenerator.config` and enrich it with some comments. :yields: formatted lines or blocks that represent a text output of the YAML document. """ for name in ('scope', 'namespace', 'name', 'log', 'restapi', 'ctl', 'citus', 'consul', 'etcd', 'etcd3', 'exhibitor', 'kubernetes', 'raft', 'zookeeper'): yield from self._format_config_section(name) if 'bootstrap' in self.config: yield '\n# The bootstrap configuration. Works only when the cluster is not yet initialized.' yield '# If the cluster is already initialized, all changes in the `bootstrap` section are ignored!' yield 'bootstrap:' if 'dcs' in self.config['bootstrap']: yield ' # This section will be written into :///config after initializing' yield ' # new cluster and all other cluster members will use it as a `global configuration`.' yield ' # WARNING! If you want to change any of the parameters that were set up' yield ' # via `bootstrap.dcs` section, please use `patronictl edit-config`!' yield ' dcs:' for name in ('loop_wait', 'retry_timeout', 'ttl'): if name in self.config['bootstrap']['dcs']: yield self._format_block({name: self.config['bootstrap']['dcs'].pop(name)}, ' ') for name, value in self.config['bootstrap']['dcs'].items(): yield self._format_block({name: value}, ' ') for name in ('postgresql', 'watchdog', 'tags'): yield from self._format_config_section(name) def _write_config_to_fd(self, fd: TextIO) -> None: """Format and write current :attr:`~AbstractConfigGenerator.config` to provided file descriptor. :param fd: where to write the config file. Could be ``sys.stdout`` or the real file. """ fd.write('\n'.join(self._format_config())) def write_config(self) -> None: """Write current :attr:`~AbstractConfigGenerator.config` to the output file if provided, to stdout otherwise.""" if self.output_file: dir_path = os.path.dirname(self.output_file) if dir_path and not os.path.isdir(dir_path): os.makedirs(dir_path) with open(self.output_file, 'w', encoding='UTF-8') as output_file: self._write_config_to_fd(output_file) else: self._write_config_to_fd(sys.stdout) class SampleConfigGenerator(AbstractConfigGenerator): """Object representing the generated sample Patroni config. Sane defults are used based on the gathered PG version. """ @property def get_auth_method(self) -> str: """Return the preferred authentication method for a specific PG version if provided or the default ``md5``. :returns: :class:`str` value for the preferred authentication method. """ return 'scram-sha-256' if self.pg_major and self.pg_major >= 100000 else 'md5' def _get_int_major_version(self) -> int: """Get major PostgreSQL version from the binary as an integer. :returns: an integer PostgreSQL major version representation gathered from the PostgreSQL binary. See :func:`~patroni.postgresql.misc.postgres_major_version_to_int` and :func:`~patroni.utils.get_major_version`. """ postgres_bin = ((self.config.get('postgresql') or {}).get('bin_name') or {}).get('postgres', 'postgres') return postgres_major_version_to_int(get_major_version(self.config['postgresql'].get('bin_dir'), postgres_bin)) def generate(self) -> None: """Generate sample config using some sane defaults and update :attr:`~AbstractConfigGenerator.config`.""" self.pg_major = self._get_int_major_version() self.config['postgresql']['parameters'] = {'password_encryption': self.get_auth_method} username = self.config["postgresql"]["authentication"]["replication"]["username"] self.config['postgresql']['pg_hba'] = [ f'host all all all {self.get_auth_method}', f'host replication {username} all {self.get_auth_method}' ] # add version-specific configuration wal_keep_param = 'wal_keep_segments' if self.pg_major < 130000 else 'wal_keep_size' self.config['bootstrap']['dcs']['postgresql']['parameters'][wal_keep_param] = \ ConfigHandler.CMDLINE_OPTIONS[wal_keep_param][0] wal_level = 'hot_standby' if self.pg_major < 90600 else 'replica' self.config['bootstrap']['dcs']['postgresql']['parameters']['wal_level'] = wal_level self.config['bootstrap']['dcs']['postgresql']['use_pg_rewind'] = True if self.pg_major >= 110000: self.config['postgresql']['authentication'].setdefault( 'rewind', {'username': 'rewind_user'}).setdefault('password', NO_VALUE_MSG) class RunningClusterConfigGenerator(AbstractConfigGenerator): """Object representing the Patroni config generated using information gathered from the running instance. :ivar dsn: DSN string for the local instance to get GUC values from (if provided). :ivar parsed_dsn: DSN string parsed into a dictionary (see :func:`~patroni.postgresql.config.parse_dsn`). """ def __init__(self, output_file: Optional[str] = None, dsn: Optional[str] = None) -> None: """Additionally store the passed dsn (if any) in both original and parsed version and run config generation. :param output_file: full path to the output file to be used. :param dsn: DSN string for the local instance to get GUC values from. :raises: :exc:`~patroni.exceptions.PatroniException`: if DSN parsing failed. """ self.dsn = dsn self.parsed_dsn = {} super().__init__(output_file) @property def _get_hba_conn_types(self) -> Tuple[str, ...]: """Return the connection types allowed. If :attr:`~RunningClusterConfigGenerator.pg_major` is defined, adds additional parameters for PostgreSQL version >=16. :returns: tuple of the connection methods allowed. """ allowed_types = ('local', 'host', 'hostssl', 'hostnossl', 'hostgssenc', 'hostnogssenc') if self.pg_major and self.pg_major >= 160000: allowed_types += ('include', 'include_if_exists', 'include_dir') return allowed_types @property def _required_pg_params(self) -> List[str]: """PG configuration prameters that have to be always present in the generated config. :returns: list of the parameter names. """ return ['hba_file', 'ident_file', 'config_file', 'data_directory'] + \ list(ConfigHandler.CMDLINE_OPTIONS.keys()) def _get_bin_dir_from_running_instance(self) -> str: """Define the directory postgres binaries reside using postmaster's pid executable. :returns: path to the PostgreSQL binaries directory. :raises: :exc:`~patroni.exceptions.PatroniException`: if: * pid could not be obtained from the ``postmaster.pid`` file; or * :exc:`OSError` occured during ``postmaster.pid`` file handling; or * the obtained postmaster pid doesn't exist. """ postmaster_pid = None data_dir = self.config['postgresql']['data_dir'] try: with open(f"{data_dir}/postmaster.pid", 'r') as pid_file: postmaster_pid = pid_file.readline() if not postmaster_pid: raise PatroniException('Failed to obtain postmaster pid from postmaster.pid file') postmaster_pid = int(postmaster_pid.strip()) except OSError as err: raise PatroniException(f'Error while reading postmaster.pid file: {err}') try: return os.path.dirname(psutil.Process(postmaster_pid).exe()) except psutil.NoSuchProcess: raise PatroniException("Obtained postmaster pid doesn't exist.") @contextmanager def _get_connection_cursor(self) -> Iterator[Union['cursor', 'Cursor[Any]']]: """Get cursor for the PG connection established based on the stored information. :raises: :exc:`~patroni.exceptions.PatroniException`: if :exc:`psycopg.Error` occured. """ try: conn = psycopg.connect(dsn=self.dsn, password=self.config['postgresql']['authentication']['superuser']['password']) with conn.cursor() as cur: yield cur conn.close() except psycopg.Error as e: raise PatroniException(f'Failed to establish PostgreSQL connection: {e}') def _set_pg_params(self, cur: Union['cursor', 'Cursor[Any]']) -> None: """Extend :attr:`~RunningClusterConfigGenerator.config` with the actual PG GUCs values. THe following GUC values are set: * Non-internal having configuration file, postmaster command line or environment variable as a source. * List of the always required parameters (see :meth:`~RunningClusterConfigGenerator._required_pg_params`). :param cur: connection cursor to use. """ cur.execute("SELECT name, pg_catalog.current_setting(name) FROM pg_catalog.pg_settings " "WHERE context <> 'internal' " "AND source IN ('configuration file', 'command line', 'environment variable') " "AND category <> 'Write-Ahead Log / Recovery Target' " "AND setting <> '(disabled)' " "OR name = ANY(%s)", (self._required_pg_params,)) helper_dict = dict.fromkeys(['port', 'listen_addresses']) self.config['postgresql'].setdefault('parameters', {}) for param, value in cur.fetchall(): if param == 'data_directory': self.config['postgresql']['data_dir'] = value elif param == 'cluster_name' and value: self.config['scope'] = value elif param in ('archive_command', 'restore_command', 'archive_cleanup_command', 'recovery_end_command', 'ssl_passphrase_command', 'hba_file', 'ident_file', 'config_file'): # write commands to the local config due to security implications # write hba/ident/config_file to local config to ensure they are not removed later self.config['postgresql']['parameters'][param] = value elif param in helper_dict: helper_dict[param] = value else: self.config['bootstrap']['dcs']['postgresql']['parameters'][param] = value connect_port = self.parsed_dsn.get('port', os.getenv('PGPORT', helper_dict['port'])) self.config['postgresql']['connect_address'] = f'{self._IP}:{connect_port}' self.config['postgresql']['listen'] = f'{helper_dict["listen_addresses"]}:{helper_dict["port"]}' def _set_su_params(self) -> None: """Extend :attr:`~RunningClusterConfigGenerator.config` with the superuser auth information. Information set is based on the options used for connection. """ su_params: Dict[str, str] = {} for conn_param, env_var in _AUTH_ALLOWED_PARAMETERS_MAPPING.items(): val = self.parsed_dsn.get(conn_param, os.getenv(env_var)) if val: su_params[conn_param] = val patroni_env_su_username = ((self.config.get('authentication') or {}).get('superuser') or {}).get('username') patroni_env_su_pwd = ((self.config.get('authentication') or {}).get('superuser') or {}).get('password') # because we use "username" in the config for some reason su_params['username'] = su_params.pop('user', patroni_env_su_username) or getuser() su_params['password'] = su_params.get('password', patroni_env_su_pwd) or \ getpass('Please enter the user password:') self.config['postgresql']['authentication'] = { 'superuser': su_params, 'replication': {'username': NO_VALUE_MSG, 'password': NO_VALUE_MSG} } def _set_conf_files(self) -> None: """Extend :attr:`~RunningClusterConfigGenerator.config` with ``pg_hba.conf`` and ``pg_ident.conf`` content. .. note:: This function only defines ``postgresql.pg_hba`` and ``postgresql.pg_ident`` when ``hba_file`` and ``ident_file`` are set to the defaults. It may happen these files are located outside of ``PGDATA`` and Patroni doesn't have write permissions for them. :raises: :exc:`~patroni.exceptions.PatroniException`: if :exc:`OSError` occured during the conf files handling. """ default_hba_path = os.path.join(self.config['postgresql']['data_dir'], 'pg_hba.conf') if self.config['postgresql']['parameters']['hba_file'] == default_hba_path: try: self.config['postgresql']['pg_hba'] = list( filter(lambda i: i and i.split()[0] in self._get_hba_conn_types, read_stripped(default_hba_path))) except OSError as err: raise PatroniException(f'Failed to read pg_hba.conf: {err}') default_ident_path = os.path.join(self.config['postgresql']['data_dir'], 'pg_ident.conf') if self.config['postgresql']['parameters']['ident_file'] == default_ident_path: try: self.config['postgresql']['pg_ident'] = [i for i in read_stripped(default_ident_path) if i and not i.startswith('#')] except OSError as err: raise PatroniException(f'Failed to read pg_ident.conf: {err}') if not self.config['postgresql']['pg_ident']: del self.config['postgresql']['pg_ident'] def _enrich_config_from_running_instance(self) -> None: """Extend :attr:`~RunningClusterConfigGenerator.config` with the values gathered from the running instance. Retrieve the following information from the running PostgreSQL instance: * superuser auth parameters (see :meth:`~RunningClusterConfigGenerator._set_su_params`); * some GUC values (see :meth:`~RunningClusterConfigGenerator._set_pg_params`); * ``postgresql.connect_address``, ``postgresql.listen``; * ``postgresql.pg_hba`` and ``postgresql.pg_ident`` (see :meth:`~RunningClusterConfigGenerator._set_conf_files`) And redefine ``scope`` with the ``cluster_name`` GUC value if set. :raises: :exc:`~patroni.exceptions.PatroniException`: if the provided user doesn't have superuser privileges. """ self._set_su_params() with self._get_connection_cursor() as cur: self.pg_major = getattr(cur.connection, 'server_version', 0) if not parse_bool(cur.connection.info.parameter_status('is_superuser')): raise PatroniException('The provided user does not have superuser privilege') self._set_pg_params(cur) self._set_conf_files() def generate(self) -> None: """Generate config using the info gathered from the specified running PG instance. Result is written to :attr:`~RunningClusterConfigGenerator.config`. """ if self.dsn: self.parsed_dsn = parse_dsn(self.dsn) or {} if not self.parsed_dsn: raise PatroniException('Failed to parse DSN string') self._enrich_config_from_running_instance() self.config['postgresql']['bin_dir'] = self._get_bin_dir_from_running_instance() def generate_config(output_file: str, sample: bool, dsn: Optional[str]) -> None: """Generate Patroni configuration file. :param output_file: Full path to the configuration file to be used. If not provided, result is sent to ``stdout``. :param sample: Optional flag. If set, no source instance will be used - generate config with some sane defaults. :param dsn: Optional DSN string for the local instance to get GUC values from. """ try: if sample: config_generator = SampleConfigGenerator(output_file) else: config_generator = RunningClusterConfigGenerator(output_file, dsn) config_generator.write_config() except PatroniException as e: sys.exit(str(e)) except Exception as e: sys.exit(f'Unexpected exception: {e}') patroni-3.2.2/patroni/ctl.py000066400000000000000000003116031455170150700160130ustar00rootroot00000000000000"""Implement ``patronictl``: a command-line application which utilises the REST API to perform cluster operations. :var CONFIG_DIR_PATH: path to Patroni configuration directory as per :func:`click.get_app_dir` output. :var CONFIG_FILE_PATH: default path to ``patronictl.yaml`` configuration file. :var DCS_DEFAULTS: auxiliary dictionary to build the DCS section of the configuration file. Mainly used to help parsing ``--dcs-url`` command-line option of ``patronictl``. .. note:: Most of the ``patronictl`` commands (``restart``/``reinit``/``pause``/``resume``/``show-config``/``edit-config`` and similar) require the ``group`` argument and work only for that specific Citus ``group``. If not specified in the command line the ``group`` might be taken from the configuration file. If it is also missing in the configuration file we assume that this is just a normal Patroni cluster (not Citus). """ import click import codecs import copy import datetime import dateutil.parser import dateutil.tz import difflib import io import json import logging import os import random import shutil import subprocess import sys import tempfile import urllib3 import time import yaml from collections import defaultdict from contextlib import contextmanager from prettytable import ALL, FRAME, PrettyTable from urllib.parse import urlparse from typing import Any, Dict, Iterator, List, Optional, Union, Tuple, TYPE_CHECKING if TYPE_CHECKING: # pragma: no cover from psycopg import Cursor from psycopg2 import cursor try: from ydiff import markup_to_pager, PatchStream # pyright: ignore [reportMissingModuleSource] except ImportError: # pragma: no cover from cdiff import markup_to_pager, PatchStream # pyright: ignore [reportMissingModuleSource] from .config import Config, get_global_config from .dcs import get_dcs as _get_dcs, AbstractDCS, Cluster, Member from .exceptions import PatroniException from .postgresql.misc import postgres_version_to_int from .utils import cluster_as_json, patch_config, polling_loop from .request import PatroniRequest from .version import __version__ CONFIG_DIR_PATH = click.get_app_dir('patroni') CONFIG_FILE_PATH = os.path.join(CONFIG_DIR_PATH, 'patronictl.yaml') DCS_DEFAULTS: Dict[str, Dict[str, Any]] = { 'zookeeper': {'port': 2181, 'template': "zookeeper:\n hosts: ['{host}:{port}']"}, 'exhibitor': {'port': 8181, 'template': "exhibitor:\n hosts: [{host}]\n port: {port}"}, 'consul': {'port': 8500, 'template': "consul:\n host: '{host}:{port}'"}, 'etcd': {'port': 2379, 'template': "etcd:\n host: '{host}:{port}'"}, 'etcd3': {'port': 2379, 'template': "etcd3:\n host: '{host}:{port}'"}} class PatroniCtlException(click.ClickException): """Raised upon issues faced by ``patronictl`` utility.""" pass class PatronictlPrettyTable(PrettyTable): """Utilitary class to print pretty tables. Extend :class:`~prettytable.PrettyTable` to make it print custom information in the header line. The idea is to print a header line like this: ``` + Cluster: batman --------+--------+---------+----+-----------+ ``` Instead of the default header line which would contain only dash and plus characters. """ def __init__(self, header: str, *args: Any, **kwargs: Any) -> None: """Create a :class:`PatronictlPrettyTable` instance with the given *header*. :param header: custom string to be put in the first header line of the table. :param args: positional arguments to be passed to :class:`~prettytable.PrettyTable` constructor. :param kwargs: keyword arguments to be passed to :class:`~prettytable.PrettyTable` constructor. """ super(PatronictlPrettyTable, self).__init__(*args, **kwargs) self.__table_header = header self.__hline_num = 0 self.__hline: str def __build_header(self, line: str) -> str: """Build the custom header line for the table. .. note:: Expected to be called only against the very first header line of the table. :param line: the original header line. :returns: the modified header line. """ header = self.__table_header[:len(line) - 2] return "".join([line[0], header, line[1 + len(header):]]) def _stringify_hrule(self, *args: Any, **kwargs: Any) -> str: """Get the string representation of a header line. Inject the custom header line, if processing the first header line. .. note:: New implementation for injecting a custom header line, which is used from :mod:`prettytable` 2.2.0 onwards. :returns: string representation of a header line. """ ret = super(PatronictlPrettyTable, self)._stringify_hrule(*args, **kwargs) where = args[1] if len(args) > 1 else kwargs.get('where') if where == 'top_' and self.__table_header: ret = self.__build_header(ret) self.__hline_num += 1 return ret def _is_first_hline(self) -> bool: """Check if the current line being processed is the very first line of the header. :returns: ``True`` if processing the first header line, ``False`` otherwise. """ return self.__hline_num == 0 def _set_hline(self, value: str) -> None: """Set header line string representation. :param value: string representing a header line. """ self.__hline = value def _get_hline(self) -> str: """Get string representation of a header line. Inject the custom header line, if processing the first header line. .. note:: Original implementation for injecting a custom header line, and is used up to :mod:`prettytable` 2.2.0. From :mod:`prettytable` 2.2.0 onwards :func:`_stringify_hrule` is used instead. :returns: string representing a header line. """ ret = self.__hline # Inject nice table header if self._is_first_hline() and self.__table_header: ret = self.__build_header(ret) self.__hline_num += 1 return ret _hrule = property(_get_hline, _set_hline) def parse_dcs(dcs: Optional[str]) -> Optional[Dict[str, Any]]: """Parse a DCS URL. :param dcs: the DCS URL in the format ``DCS://HOST:PORT/NAMESPACE``. ``DCS`` can be one among: * ``consul`` * ``etcd`` * ``etcd3`` * ``exhibitor`` * ``zookeeper`` If ``DCS`` is not specified, assume ``etcd`` by default. If ``HOST`` is not specified, assume ``localhost`` by default. If ``PORT`` is not specified, assume the default port of the given ``DCS``. If ``NAMESPACE`` is not specified, use whatever is in config. :returns: ``None`` if *dcs* is ``None``, otherwise a dictionary. The dictionary represents *dcs* as if it were parsed from the Patroni configuration file. Additionally, if a namespace is specified in *dcs*, return a ``namespace`` key with the parsed value. :raises: :class:`PatroniCtlException`: if the DCS name in *dcs* is not valid. :Example: >>> parse_dcs('') {'etcd': {'host': 'localhost:2379'}} >>> parse_dcs('etcd://:2399') {'etcd': {'host': 'localhost:2399'}} >>> parse_dcs('etcd://test') {'etcd': {'host': 'test:2379'}} >>> parse_dcs('etcd3://random.com:2399') {'etcd3': {'host': 'random.com:2399'}} >>> parse_dcs('etcd3://random.com:2399/customnamespace') {'etcd3': {'host': 'random.com:2399'}, 'namespace': '/customnamespace'} """ if dcs is None: return None elif '//' not in dcs: dcs = '//' + dcs parsed = urlparse(dcs) scheme = parsed.scheme port = int(parsed.port) if parsed.port else None if scheme == '': scheme = ([k for k, v in DCS_DEFAULTS.items() if v['port'] == port] or ['etcd'])[0] elif scheme not in DCS_DEFAULTS: raise PatroniCtlException('Unknown dcs scheme: {}'.format(scheme)) default = DCS_DEFAULTS[scheme] ret = yaml.safe_load(default['template'].format(host=parsed.hostname or 'localhost', port=port or default['port'])) if parsed.path and parsed.path.strip() != '/': ret['namespace'] = parsed.path.strip() return ret def load_config(path: str, dcs_url: Optional[str]) -> Dict[str, Any]: """Load configuration file from *path* and optionally override its DCS configuration with *dcs_url*. :param path: path to the configuration file. :param dcs_url: the DCS URL in the format ``DCS://HOST:PORT/NAMESPACE``, e.g. ``etcd3://random.com:2399/service``. If given, override whatever DCS and ``namespace`` that are set in the configuration file. See :func:`parse_dcs` for more information. :returns: a dictionary representing the configuration. :raises: :class:`PatroniCtlException`: if *path* does not exist or is not readable. """ if not (os.path.exists(path) and os.access(path, os.R_OK)): if path != CONFIG_FILE_PATH: # bail if non-default config location specified but file not found / readable raise PatroniCtlException('Provided config file {0} not existing or no read rights.' ' Check the -c/--config-file parameter'.format(path)) else: logging.debug('Ignoring configuration file "%s". It does not exists or is not readable.', path) else: logging.debug('Loading configuration from file %s', path) config = Config(path, validator=None).copy() dcs_kwargs = parse_dcs(dcs_url) or {} if dcs_kwargs: for d in DCS_DEFAULTS: config.pop(d, None) config.update(dcs_kwargs) return config option_format = click.option('--format', '-f', 'fmt', help='Output format', default='pretty', type=click.Choice(['pretty', 'tsv', 'json', 'yaml', 'yml'])) option_watchrefresh = click.option('-w', '--watch', type=float, help='Auto update the screen every X seconds') option_watch = click.option('-W', is_flag=True, help='Auto update the screen every 2 seconds') option_force = click.option('--force', is_flag=True, help='Do not ask for confirmation at any point') arg_cluster_name = click.argument('cluster_name', required=False, default=lambda: click.get_current_context().obj.get('scope')) option_default_citus_group = click.option('--group', required=False, type=int, help='Citus group', default=lambda: click.get_current_context().obj.get('citus', {}).get('group')) option_citus_group = click.option('--group', required=False, type=int, help='Citus group') role_choice = click.Choice(['leader', 'primary', 'standby-leader', 'replica', 'standby', 'any', 'master']) @click.group(cls=click.Group) @click.option('--config-file', '-c', help='Configuration file', envvar='PATRONICTL_CONFIG_FILE', default=CONFIG_FILE_PATH) @click.option('--dcs-url', '--dcs', '-d', 'dcs_url', help='The DCS connect url', envvar='DCS_URL') @click.option('-k', '--insecure', is_flag=True, help='Allow connections to SSL sites without certs') @click.pass_context def ctl(ctx: click.Context, config_file: str, dcs_url: Optional[str], insecure: bool) -> None: """Command-line interface for interacting with Patroni. \f Entry point of ``patronictl`` utility. Load the configuration file. .. note:: Besides *dcs_url* and *insecure*, which are used to override DCS configuration section and ``ctl.insecure`` setting, you can also override the value of ``log.level``, by default ``WARNING``, through either of these environemnt variables: * ``LOGLEVEL`` * ``PATRONI_LOGLEVEL`` * ``PATRONI_LOG_LEVEL`` :param ctx: click context to be passed to sub-commands. :param config_file: path to the configuration file. :param dcs_url: the DCS URL in the format ``DCS://HOST:PORT``, e.g. ``etcd3://random.com:2399``. If given override whatever DCS is set in the configuration file. :param insecure: if ``True`` allow SSL connections without client certiticates. Override what is configured through ``ctl.insecure` in the configuration file. """ level = 'WARNING' for name in ('LOGLEVEL', 'PATRONI_LOGLEVEL', 'PATRONI_LOG_LEVEL'): level = os.environ.get(name, level) logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=level) logging.captureWarnings(True) # Capture eventual SSL warning ctx.obj = load_config(config_file, dcs_url) # backward compatibility for configuration file where ctl section is not defined ctx.obj.setdefault('ctl', {})['insecure'] = ctx.obj.get('ctl', {}).get('insecure') or insecure def get_dcs(config: Dict[str, Any], scope: str, group: Optional[int]) -> AbstractDCS: """Get the DCS object. :param config: Patroni configuration. :param scope: cluster name. :param group: if *group* is defined, use it to select which alternative Citus group this DCS refers to. If *group* is ``None`` and a Citus configuration exists, assume this is the coordinator. Coordinator has the group ``0``. Refer to the module note for more details. :returns: a subclass of :class:`~patroni.dcs.AbstractDCS`, according to the DCS technology that is configured. :raises: :class:`PatroniCtlException`: if not suitable DCS configuration could be found. """ config.update({'scope': scope, 'patronictl': True}) if group is not None: config['citus'] = {'group': group} config.setdefault('name', scope) try: dcs = _get_dcs(config) if config.get('citus') and group is None: dcs.is_citus_coordinator = lambda: True return dcs except PatroniException as e: raise PatroniCtlException(str(e)) def request_patroni(member: Member, method: str = 'GET', endpoint: Optional[str] = None, data: Optional[Any] = None) -> urllib3.response.HTTPResponse: """Perform a request to Patroni REST API. :param member: DCS member, used to get the base URL of its REST API server. :param method: HTTP method to be used, e.g. ``GET``. :param endpoint: URL path of the request, e.g. ``patroni``. :param data: anything to be used as the request body. :returns: the response for the request. """ ctx = click.get_current_context() # the current click context request_executor = ctx.obj.get('__request_patroni') if not request_executor: request_executor = ctx.obj['__request_patroni'] = PatroniRequest(ctx.obj) return request_executor(member, method, endpoint, data) def print_output(columns: Optional[List[str]], rows: List[List[Any]], alignment: Optional[Dict[str, str]] = None, fmt: str = 'pretty', header: str = '', delimiter: str = '\t') -> None: """Print tabular information. :param columns: list of column names. :param rows: list of rows. Each item is a list of values for the columns. :param alignment: alignment to be applied to column values. Each key is the name of a column to be aligned, and the corresponding value can be one among: * ``l``: left-aligned * ``c``: center-aligned * ``r``: right-aligned A key in the dictionary is only required for a column that needs a specific alignment. Only apply when *fmt* is either ``pretty`` or ``topology``. :param fmt: the printing format. Can be one among: * ``json``: to print as a JSON string -- array of objects; * ``yaml`` or ``yml``: to print as a YAML string; * ``tsv``: to print a table of separated values, by default by tab; * ``pretty``: to print a pretty table; * ``topology``: similar to *pretty*, but with a topology view when printing cluster members. :param header: a string to be included in the first line of the table header, typically the cluster name. Only apply when *fmt* is either ``pretty`` or ``topology``. :param delimiter: the character to be used as delimiter when *fmt* is ``tsv``. """ if fmt in {'json', 'yaml', 'yml'}: elements = [{k: v for k, v in zip(columns or [], r) if not header or str(v)} for r in rows] func = json.dumps if fmt == 'json' else format_config_for_editing click.echo(func(elements)) elif fmt in {'pretty', 'tsv', 'topology'}: list_cluster = bool(header and columns and columns[0] == 'Cluster') if list_cluster and columns and 'Tags' in columns: # we want to format member tags as YAML i = columns.index('Tags') for row in rows: if row[i]: # Member tags are printed in YAML block format if *fmt* is ``pretty``. If *fmt* is either ``tsv`` # or ``topology``, then write in the YAML flow format, which is similar to JSON row[i] = format_config_for_editing(row[i], fmt != 'pretty').strip() if list_cluster and header and fmt != 'tsv': # skip cluster name and maybe Citus group if pretty-printing skip_cols = 2 if ' (group: ' in header else 1 columns = columns[skip_cols:] if columns else [] rows = [row[skip_cols:] for row in rows] # In ``tsv`` format print cluster name in every row as the first column if fmt == 'tsv': for r in ([columns] if columns else []) + rows: click.echo(delimiter.join(map(str, r))) # In ``pretty`` and ``topology`` formats print the cluster name only once, in the very first header line else: # If any value is multi-line, then add horizontal between all table rows while printing to get a clear # visual separation of rows. hrules = ALL if any(any(isinstance(c, str) and '\n' in c for c in r) for r in rows) else FRAME table = PatronictlPrettyTable(header, columns, hrules=hrules) table.align = 'l' for k, v in (alignment or {}).items(): table.align[k] = v for r in rows: table.add_row(r) click.echo(table) def watching(w: bool, watch: Optional[int], max_count: Optional[int] = None, clear: bool = True) -> Iterator[int]: """Yield a value every ``x`` seconds. Used to run a command with a watch-based aproach. :param w: if ``True`` and *watch* is ``None``, then *watch* assumes the value ``2``. :param watch: amount of seconds to wait before yielding another value. :param max_count: maximum number of yielded values. If ``None`` keep yielding values indefinitely. :param clear: if the screen should be cleared out at each iteration. :yields: ``0`` each time *watch* seconds have passed. :Example: >>> len(list(watching(True, 1, 0))) 1 >>> len(list(watching(True, 1, 1))) 2 >>> len(list(watching(True, None, 0))) 1 """ if w and not watch: watch = 2 if watch and clear: click.clear() yield 0 if max_count is not None and max_count < 1: return counter = 1 while watch and counter <= (max_count or counter): time.sleep(watch) counter += 1 if clear: click.clear() yield 0 def get_all_members(obj: Dict[str, Any], cluster: Cluster, group: Optional[int], role: str = 'leader') -> Iterator[Member]: """Get all cluster members that have the given *role*. :param obj: the Patroni configuration. :param cluster: the Patroni cluster. :param group: filter which Citus group we should get members from. If ``None`` get from all groups. :param role: role to filter members. Can be one among: * ``primary`` or ``master``: the primary PostgreSQL instance; * ``replica`` or ``standby``: a standby PostgreSQL instance; * ``leader``: the leader of a Patroni cluster. Can also be used to get the leader of a Patroni standby cluster; * ``standby-leader``: the leader of a Patroni standby cluster; * ``any``: matches any node independent of its role. :yields: members that have the given *role*. """ clusters = {0: cluster} if obj.get('citus') and group is None: clusters.update(cluster.workers) if role in ('leader', 'master', 'primary', 'standby-leader'): # In the DCS the members' role can be one among: ``primary``, ``master``, ``replica`` or ``standby_leader``. # ``primary`` and ``master`` are the same thing, so we map both to ``master`` to have a simpler ``if``. # In a future release we might remove ``master`` from the available roles for the DCS members. role = {'primary': 'master', 'standby-leader': 'standby_leader'}.get(role, role) for cluster in clusters.values(): if cluster.leader is not None and cluster.leader.name and\ (role == 'leader' or cluster.leader.data.get('role') != 'master' and role == 'standby_leader' or cluster.leader.data.get('role') != 'standby_leader' and role == 'master'): yield cluster.leader.member return for cluster in clusters.values(): leader_name = (cluster.leader.member.name if cluster.leader else None) for m in cluster.members: if role == 'any' or role in ('replica', 'standby') and m.name != leader_name: yield m def get_any_member(obj: Dict[str, Any], cluster: Cluster, group: Optional[int], role: Optional[str] = None, member: Optional[str] = None) -> Optional[Member]: """Get the first found cluster member that has the given *role*. :param obj: the Patroni configuration. :param cluster: the Patroni cluster. :param group: filter which Citus group we should get members from. If ``None`` get from all groups. :param role: role to filter members. See :func:`get_all_members` for available options. :param member: if specified, then besides having the given *role*, the cluster member's name should be *member*. :returns: the first found cluster member that has the given *role*. :raises: :class:`PatroniCtlException`: if both *role* and *member* are provided. """ if member is not None: if role is not None: raise PatroniCtlException('--role and --member are mutually exclusive options') role = 'any' elif role is None: role = 'leader' for m in get_all_members(obj, cluster, group, role): if member is None or m.name == member: return m def get_all_members_leader_first(cluster: Cluster) -> Iterator[Member]: """Get all cluster members, with the cluster leader being yielded first. .. note:: Only yield members that have a ``restapi.connect_address`` configured. :yields: all cluster members, with the leader first. """ leader_name = cluster.leader.member.name if cluster.leader and cluster.leader.member.api_url else None if leader_name and cluster.leader: yield cluster.leader.member for member in cluster.members: if member.api_url and member.name != leader_name: yield member def get_cursor(obj: Dict[str, Any], cluster: Cluster, group: Optional[int], connect_parameters: Dict[str, Any], role: Optional[str] = None, member_name: Optional[str] = None) -> Union['cursor', 'Cursor[Any]', None]: """Get a cursor object to execute queries against a member that has the given *role* or *member_name*. .. note:: Besides what is passed through *connect_parameters*, this function also sets the following parameters: * ``fallback_application_name``: as ``Patroni ctl``; * ``connect_timeout``: as ``5``. :param obj: the Patroni configuration. :param cluster: the Patroni cluster. :param group: filter which Citus group we should get members to create a cursor against. If ``None`` consider members from all groups. :param connect_parameters: database connection parameters. :param role: role to filter members. See :func:`get_all_members` for available options. :param member_name: if specified, then besides having the given *role*, the cluster member's name should be *member_name*. :returns: a cursor object to execute queries against the database. Can be either: * A :class:`psycopg.Cursor` if using :mod:`psycopg`; or * A :class:`psycopg2.extensions.cursor` if using :mod:`psycopg2`; * ``None`` if not able to get a cursor that attendees *role* and *member_name*. """ member = get_any_member(obj, cluster, group, role=role, member=member_name) if member is None: return None params = member.conn_kwargs(connect_parameters) params.update({'fallback_application_name': 'Patroni ctl', 'connect_timeout': '5'}) if 'dbname' in connect_parameters: params['dbname'] = connect_parameters['dbname'] else: params.pop('dbname') from . import psycopg conn = psycopg.connect(**params) cursor = conn.cursor() # If we want ``any`` node we are fine to return the cursor. ``None`` is similar to ``any`` at this point, as it's # been dealt with through :func:`get_any_member`. # If we want the Patroni leader node, :func:`get_any_member` already checks that for us if role in (None, 'any', 'leader'): return cursor # If we want something other than ``any`` or ``leader``, then we do not rely only on the DCS information about # members, but rather double check the underlying Postgres status. cursor.execute('SELECT pg_catalog.pg_is_in_recovery()') row = cursor.fetchone() in_recovery = not row or row[0] if in_recovery and role in ('replica', 'standby', 'standby-leader')\ or not in_recovery and role in ('master', 'primary'): return cursor conn.close() return None def get_members(obj: Dict[str, Any], cluster: Cluster, cluster_name: str, member_names: List[str], role: str, force: bool, action: str, ask_confirmation: bool = True, group: Optional[int] = None) -> List[Member]: """Get the list of members based on the given filters. .. note:: Contain some filtering and checks processing that are common to several actions that are exposed by `patronictl`, like: * Get members of *cluster* that respect the given *member_names*, *role*, and *group*; * Bypass confirmations; * Prompt user for information that has not been passed through the command-line options; * etc. Designed to handle both attended and unattended ``patronictl`` commands execution that need to retrieve and validate the members before doing anything. In the very end may call :func:`confirm_members_action` to ask if the user would like to proceed with *action* over the retrieved members. That won't actually perform the action, but it works as the "last confirmation" before the *action* is processed by the caller method. Additional checks can also be implemented in the caller method, in which case you might want to pass ``ask_confirmation=False``, and later call :func:`confirm_members_action` manually in the caller method. That way the workflow won't look broken to the user that is interacting with ``patronictl``. :param obj: Patroni configuration. :param cluster: Patroni cluster. :param cluster_name: name of the Patroni cluster. :param member_names: used to filter which members should take the *action* based on their names. Each item is the name of a Patroni member, as per ``name`` configuration. If *member_names* is an empty :class:`tuple` no filters are applied based on names. :param role: used to filter which members should take the *action* based on their role. See :func:`get_all_members` for available options. :param force: if ``True``, then it won't ask for confirmations at any point nor prompt the user to select values for options that were not specified through the command-line. :param action: the action that is being processed, one among: * ``reload``: reload PostgreSQL configuration; or * ``restart``: restart PostgreSQL; or * ``reinitialize``: reinitialize PostgreSQL data directory; or * ``flush``: discard scheduled actions. :param ask_confirmation: if ``False``, then it won't ask for the final confirmation regarding the *action* before returning the list of members. Usually useful as ``False`` if you want to perform additional checks in the caller method besides the checks that are performed through this generic method. :param group: filter which Citus group we should get members from. If ``None`` consider members from all groups. :returns: a list of members that respect the given filters. :raises: :class:`PatroniCtlException`: if * Cluster does not have members that match the given *role*; or * Cluster does not have members that match the given *member_names*; or * No member with given *role* is found among the specified *member_names*. """ members = list(get_all_members(obj, cluster, group, role)) candidates = {m.name for m in members} if not force or role: if not member_names and not candidates: raise PatroniCtlException('{0} cluster doesn\'t have any members'.format(cluster_name)) output_members(obj, cluster, cluster_name, group=group) if member_names: member_names = list(set(member_names) & candidates) if not member_names: raise PatroniCtlException('No {0} among provided members'.format(role)) elif action != 'reinitialize': member_names = list(candidates) if not member_names and not force: member_names = [click.prompt('Which member do you want to {0} [{1}]?'.format(action, ', '.join(candidates)), type=str, default='')] for member_name in member_names: if member_name not in candidates: raise PatroniCtlException('{0} is not a member of cluster'.format(member_name)) members = [m for m in members if m.name in member_names] if ask_confirmation: confirm_members_action(members, force, action) return members def confirm_members_action(members: List[Member], force: bool, action: str, scheduled_at: Optional[datetime.datetime] = None) -> None: """Ask for confirmation if *action* should be taken by *members*. :param members: list of member which will take the *action*. :param force: if ``True`` skip the confirmation prompt and allow the *action* to proceed. :param action: the action that is being processed, one among: * ``reload``: reload PostgreSQL configuration; or * ``restart``: restart PostgreSQL; or * ``reinitialize``: reinitialize PostgreSQL data directory; or * ``flush``: discard scheduled actions. :param scheduled_at: timestamp at which the *action* should be scheduled to. If ``None`` *action* is taken immediately. :raises: :class:`PatroniCtlException`: if the user aborted the *action*. """ if scheduled_at: if not force: confirm = click.confirm('Are you sure you want to schedule {0} of members {1} at {2}?' .format(action, ', '.join([m.name for m in members]), scheduled_at)) if not confirm: raise PatroniCtlException('Aborted scheduled {0}'.format(action)) else: if not force: confirm = click.confirm('Are you sure you want to {0} members {1}?' .format(action, ', '.join([m.name for m in members]))) if not confirm: raise PatroniCtlException('Aborted {0}'.format(action)) @ctl.command('dsn', help='Generate a dsn for the provided member, defaults to a dsn of the leader') @click.option('--role', '-r', help='Give a dsn of any member with this role', type=role_choice, default=None) @click.option('--member', '-m', help='Generate a dsn for this member', type=str) @arg_cluster_name @option_citus_group @click.pass_obj def dsn(obj: Dict[str, Any], cluster_name: str, group: Optional[int], role: Optional[str], member: Optional[str]) -> None: """Process ``dsn`` command of ``patronictl`` utility. Get DSN to connect to *member*. .. note:: If no *role* nor *member* is given assume *role* as ``leader``. :param obj: Patroni configuration. :param cluster_name: name of the Patroni cluster. :param group: filter which Citus group we should get members to get DSN from. Refer to the module note for more details. :param role: filter which members to get DSN from based on their role. See :func:`get_all_members` for available options. :param member: filter which member to get DSN from based on its name. :raises: :class:`PatroniCtlException`: if * both *role* and *member* are provided; or * No member matches requested *member* or *role*. """ cluster = get_dcs(obj, cluster_name, group).get_cluster() m = get_any_member(obj, cluster, group, role=role, member=member) if m is None: raise PatroniCtlException('Can not find a suitable member') params = m.conn_kwargs() click.echo('host={host} port={port}'.format(**params)) @ctl.command('query', help='Query a Patroni PostgreSQL member') @arg_cluster_name @option_citus_group @click.option('--format', 'fmt', help='Output format (pretty, tsv, json, yaml)', default='tsv') @click.option('--file', '-f', 'p_file', help='Execute the SQL commands from this file', type=click.File('rb')) @click.option('--password', help='force password prompt', is_flag=True) @click.option('-U', '--username', help='database user name', type=str) @option_watch @option_watchrefresh @click.option('--role', '-r', help='The role of the query', type=role_choice, default=None) @click.option('--member', '-m', help='Query a specific member', type=str) @click.option('--delimiter', help='The column delimiter', default='\t') @click.option('--command', '-c', help='The SQL commands to execute') @click.option('-d', '--dbname', help='database name to connect to', type=str) @click.pass_obj def query( obj: Dict[str, Any], cluster_name: str, group: Optional[int], role: Optional[str], member: Optional[str], w: bool, watch: Optional[int], delimiter: str, command: Optional[str], p_file: Optional[io.BufferedReader], password: Optional[bool], username: Optional[str], dbname: Optional[str], fmt: str = 'tsv' ) -> None: """Process ``query`` command of ``patronictl`` utility. Perform a Postgres query in a Patroni node. :param obj: Patroni configuration. :param cluster_name: name of the Patroni cluster. :param group: filter which Citus group we should get members from to perform the query. Refer to the module note for more details. :param role: filter which members to perform the query against based on their role. See :func:`get_all_members` for available options. :param member: filter which member to perform the query against based on its name. :param w: perform query with watch-based approach every 2 seconds. :param watch: perform query with watch-based approach every *watch* seconds. :param delimiter: column delimiter when *fmt* is ``tsv``. :param command: SQL query to execute. :param p_file: path to file containing SQL query to execute. :param password: if ``True`` then prompt for password. :param username: name of the database user. :param dbname: name of the database. :param fmt: the output table printing format. See :func:`print_output` for available options. :raises: :class:`PatroniCtlException`: if: * if * both *role* and *member* are provided; or * both *file* and *command* are provided; or * neither *file* nor *command* is provided. """ if p_file is not None: if command is not None: raise PatroniCtlException('--file and --command are mutually exclusive options') sql = p_file.read().decode('utf-8') else: if command is None: raise PatroniCtlException('You need to specify either --command or --file') sql = command connect_parameters: Dict[str, str] = {} if username: connect_parameters['username'] = username if password: connect_parameters['password'] = click.prompt('Password', hide_input=True, type=str) if dbname: connect_parameters['dbname'] = dbname dcs = get_dcs(obj, cluster_name, group) cluster = cursor = None for _ in watching(w, watch, clear=False): if cluster is None: cluster = dcs.get_cluster() # cursor = get_cursor(obj, cluster, group, connect_parameters, role=role, member=member) output, header = query_member(obj, cluster, group, cursor, member, role, sql, connect_parameters) print_output(header, output, fmt=fmt, delimiter=delimiter) def query_member(obj: Dict[str, Any], cluster: Cluster, group: Optional[int], cursor: Union['cursor', 'Cursor[Any]', None], member: Optional[str], role: Optional[str], command: str, connect_parameters: Dict[str, Any]) -> Tuple[List[List[Any]], Optional[List[Any]]]: """Execute SQL *command* against a member. :param obj: Patroni configuration. :param cluster: the Patroni cluster. :param group: filter which Citus group we should get members from to perform the query. Refer to the module note for more details. :param cursor: cursor through which *command* is executed. If ``None`` a new cursor is instantiated through :func:`get_cursor`. :param member: filter which member to create a cursor against based on its name, if *cursor* is ``None``. :param role: filter which member to create a cursor against based on their role, if *cursor* is ``None``. See :func:`get_all_members` for available options. :param command: SQL command to be executed. :param connect_parameters: connection parameters to be passed down to :func:`get_cursor`, if *cursor* is ``None``. :returns: a tuple composed of two items: * List of rows returned by the executed *command*; * List of columns related to the rows returned by the executed *command*. If an error occurs while executing *command*, then returns the following values in the tuple: * List with 2 items: * Current timestamp; * Error message. * ``None``. """ from . import psycopg try: if cursor is None: cursor = get_cursor(obj, cluster, group, connect_parameters, role=role, member_name=member) if cursor is None: if member is not None: message = f'No connection to member {member} is available' elif role is not None: message = f'No connection to role {role} is available' else: message = 'No connection is available' logging.debug(message) return [[timestamp(0), message]], None cursor.execute(command.encode('utf-8')) return [list(row) for row in cursor], cursor.description and [d.name for d in cursor.description] except psycopg.DatabaseError as de: logging.debug(de) if cursor is not None and not cursor.connection.closed: cursor.connection.close() message = de.diag.sqlstate or str(de) message = message.replace('\n', ' ') return [[timestamp(0), 'ERROR, SQLSTATE: {0}'.format(message)]], None @ctl.command('remove', help='Remove cluster from DCS') @click.argument('cluster_name') @option_citus_group @option_format @click.pass_obj def remove(obj: Dict[str, Any], cluster_name: str, group: Optional[int], fmt: str) -> None: """Process ``remove`` command of ``patronictl`` utility. Remove cluster *cluster_name* from the DCS. :param obj: Patroni configuration. :param cluster_name: name of the cluster which information will be wiped out of the DCS. :param group: which Citus group should have its information wiped out of the DCS. Refer to the module note for more details. :param fmt: the output table printing format. See :func:`print_output` for available options. :raises: :class:`PatroniCtlException`: if: * Patroni is running on a Citus cluster, but no *group* was specified; or * *cluster_name* does not exist; or * user did not type the expected confirmation message when prompted for confirmation; or * use did not type the correct leader name when requesting removal of a healthy cluster. """ dcs = get_dcs(obj, cluster_name, group) cluster = dcs.get_cluster() if obj.get('citus') and group is None: raise PatroniCtlException('For Citus clusters the --group must me specified') output_members(obj, cluster, cluster_name, fmt=fmt) confirm = click.prompt('Please confirm the cluster name to remove', type=str) if confirm != cluster_name: raise PatroniCtlException('Cluster names specified do not match') message = 'Yes I am aware' confirm = \ click.prompt('You are about to remove all information in DCS for {0}, please type: "{1}"'.format(cluster_name, message), type=str) if message != confirm: raise PatroniCtlException('You did not exactly type "{0}"'.format(message)) if cluster.leader and cluster.leader.name: confirm = click.prompt('This cluster currently is healthy. Please specify the leader name to continue') if confirm != cluster.leader.name: raise PatroniCtlException('You did not specify the current leader of the cluster') dcs.delete_cluster() def check_response(response: urllib3.response.HTTPResponse, member_name: str, action_name: str, silent_success: bool = False) -> bool: """Check an HTTP response and print a status message. :param response: the response to be checked. :param member_name: name of the member associated with the *response*. :param action_name: action associated with the *response*. :param silent_success: if a status message should be skipped upon a successful *response*. :returns: ``True`` if the response indicates a sucessful operation (HTTP status < ``400``), ``False`` otherwise. """ if response.status >= 400: click.echo('Failed: {0} for member {1}, status code={2}, ({3})'.format( action_name, member_name, response.status, response.data.decode('utf-8') )) return False elif not silent_success: click.echo('Success: {0} for member {1}'.format(action_name, member_name)) return True def parse_scheduled(scheduled: Optional[str]) -> Optional[datetime.datetime]: """Parse a string *scheduled* timestamp as a :class:`~datetime.datetime` object. :param scheduled: string representation of the timestamp. May also be ``now``. :returns: the corresponding :class:`~datetime.datetime` object, if *scheduled* is not ``now``, otherwise ``None``. :raises: :class:`PatroniCtlException`: if unable to parse *scheduled* from :class:`str` to :class:`~datetime.datetime`. :Example: >>> parse_scheduled(None) is None True >>> parse_scheduled('now') is None True >>> parse_scheduled('2023-05-29T04:32:31') datetime.datetime(2023, 5, 29, 4, 32, 31, tzinfo=tzlocal()) >>> parse_scheduled('2023-05-29T04:32:31-3') datetime.datetime(2023, 5, 29, 4, 32, 31, tzinfo=tzoffset(None, -10800)) """ if scheduled is not None and (scheduled or 'now') != 'now': try: scheduled_at = dateutil.parser.parse(scheduled) if scheduled_at.tzinfo is None: scheduled_at = scheduled_at.replace(tzinfo=dateutil.tz.tzlocal()) except (ValueError, TypeError): message = 'Unable to parse scheduled timestamp ({0}). It should be in an unambiguous format (e.g. ISO 8601)' raise PatroniCtlException(message.format(scheduled)) return scheduled_at return None @ctl.command('reload', help='Reload cluster member configuration') @click.argument('cluster_name') @click.argument('member_names', nargs=-1) @option_citus_group @click.option('--role', '-r', help='Reload only members with this role', type=role_choice, default='any') @option_force @click.pass_obj def reload(obj: Dict[str, Any], cluster_name: str, member_names: List[str], group: Optional[int], force: bool, role: str) -> None: """Process ``reload`` command of ``patronictl`` utility. Reload configuration of cluster members based on given filters. :param obj: Patroni configuration. :param cluster_name: name of the Patroni cluster. :param member_names: name of the members which configuration should be reloaded. :param group: filter which Citus group we should reload members. Refer to the module note for more details. :param force: perform the reload without asking for confirmations. :param role: role to filter members. See :func:`get_all_members` for available options. """ dcs = get_dcs(obj, cluster_name, group) cluster = dcs.get_cluster() members = get_members(obj, cluster, cluster_name, member_names, role, force, 'reload', group=group) for member in members: r = request_patroni(member, 'post', 'reload') if r.status == 200: click.echo('No changes to apply on member {0}'.format(member.name)) elif r.status == 202: config = get_global_config(cluster) click.echo('Reload request received for member {0} and will be processed within {1} seconds'.format( member.name, config.get('loop_wait') or dcs.loop_wait) ) else: click.echo('Failed: reload for member {0}, status code={1}, ({2})'.format( member.name, r.status, r.data.decode('utf-8')) ) @ctl.command('restart', help='Restart cluster member') @click.argument('cluster_name') @click.argument('member_names', nargs=-1) @option_citus_group @click.option('--role', '-r', help='Restart only members with this role', type=role_choice, default='any') @click.option('--any', 'p_any', help='Restart a single member only', is_flag=True) @click.option('--scheduled', help='Timestamp of a scheduled restart in unambiguous format (e.g. ISO 8601)', default=None) @click.option('--pg-version', 'version', help='Restart if the PostgreSQL version is less than provided (e.g. 9.5.2)', default=None) @click.option('--pending', help='Restart if pending', is_flag=True) @click.option('--timeout', help='Return error and fail over if necessary when restarting takes longer than this.') @option_force @click.pass_obj def restart(obj: Dict[str, Any], cluster_name: str, group: Optional[int], member_names: List[str], force: bool, role: str, p_any: bool, scheduled: Optional[str], version: Optional[str], pending: bool, timeout: Optional[str]) -> None: """Process ``restart`` command of ``patronictl`` utility. Restart Postgres on cluster members based on given filters. :param obj: Patroni configuration. :param cluster_name: name of the Patroni cluster. :param group: filter which Citus group we should restart members. Refer to the module note for more details. :param member_names: name of the members that should be restarted. :param force: perform the restart without asking for confirmations. :param role: role to filter members. See :func:`get_all_members` for available options. :param p_any: restart a single and random member among the ones that match the given filters. :param scheduled: timestamp when the restart should be scheduled to occur. If ``now`` restart immediately. :param version: restart only members which Postgres version is less than *version*. :param pending: restart only members that are flagged as ``pending restart``. :param timeout: timeout for the restart operation. If timeout is reached a failover may occur in the cluster. :raises: :class:`PatroniCtlException`: if: * *scheduled* could not be parsed; or * *version* could not be parsed; or * a restart is attempted against a cluster that is in maintenance mode. """ cluster = get_dcs(obj, cluster_name, group).get_cluster() members = get_members(obj, cluster, cluster_name, member_names, role, force, 'restart', False, group=group) if scheduled is None and not force: next_hour = (datetime.datetime.now() + datetime.timedelta(hours=1)).strftime('%Y-%m-%dT%H:%M') scheduled = click.prompt('When should the restart take place (e.g. ' + next_hour + ') ', type=str, default='now') scheduled_at = parse_scheduled(scheduled) confirm_members_action(members, force, 'restart', scheduled_at) if p_any: random.shuffle(members) members = members[:1] if version is None and not force: version = click.prompt('Restart if the PostgreSQL version is less than provided (e.g. 9.5.2) ', type=str, default='') content: Dict[str, Any] = {} if pending: content['restart_pending'] = True if version: try: postgres_version_to_int(version) except PatroniException as e: raise PatroniCtlException(e.value) content['postgres_version'] = version if scheduled_at: if get_global_config(cluster).is_paused: raise PatroniCtlException("Can't schedule restart in the paused state") content['schedule'] = scheduled_at.isoformat() if timeout is not None: content['timeout'] = timeout for member in members: if 'schedule' in content: if force and member.data.get('scheduled_restart'): r = request_patroni(member, 'delete', 'restart') check_response(r, member.name, 'flush scheduled restart', True) r = request_patroni(member, 'post', 'restart', content) if r.status == 200: click.echo('Success: restart on member {0}'.format(member.name)) elif r.status == 202: click.echo('Success: restart scheduled on member {0}'.format(member.name)) elif r.status == 409: click.echo('Failed: another restart is already scheduled on member {0}'.format(member.name)) else: click.echo('Failed: restart for member {0}, status code={1}, ({2})'.format( member.name, r.status, r.data.decode('utf-8')) ) @ctl.command('reinit', help='Reinitialize cluster member') @click.argument('cluster_name') @option_citus_group @click.argument('member_names', nargs=-1) @option_force @click.option('--wait', help='Wait until reinitialization completes', is_flag=True) @click.pass_obj def reinit(obj: Dict[str, Any], cluster_name: str, group: Optional[int], member_names: List[str], force: bool, wait: bool) -> None: """Process ``reinit`` command of ``patronictl`` utility. Reinitialize cluster members based on given filters. .. note:: Only reinitialize replica members, not a leader. :param obj: Patroni configuration. :param cluster_name: name of the Patroni cluster. :param group: filter which Citus group we should reinit members. Refer to the module note for more details. :param member_names: name of the members that should be reinitialized. :param force: perform the restart without asking for confirmations. :param wait: wait for the operation to complete. """ cluster = get_dcs(obj, cluster_name, group).get_cluster() members = get_members(obj, cluster, cluster_name, member_names, 'replica', force, 'reinitialize', group=group) wait_on_members: List[Member] = [] for member in members: body: Dict[str, bool] = {'force': force} while True: r = request_patroni(member, 'post', 'reinitialize', body) started = check_response(r, member.name, 'reinitialize') if not started and r.data.endswith(b' already in progress') \ and not force and click.confirm('Do you want to cancel it and reinitialize anyway?'): body['force'] = True continue break if started and wait: wait_on_members.append(member) last_display = [] while wait_on_members: if wait_on_members != last_display: click.echo('Waiting for reinitialize to complete on: {0}'.format( ", ".join(member.name for member in wait_on_members)) ) last_display[:] = wait_on_members time.sleep(2) for member in wait_on_members: data = json.loads(request_patroni(member, 'get', 'patroni').data.decode('utf-8')) if data.get('state') != 'creating replica': click.echo('Reinitialize is completed on: {0}'.format(member.name)) wait_on_members.remove(member) def _do_failover_or_switchover(obj: Dict[str, Any], action: str, cluster_name: str, group: Optional[int], switchover_leader: Optional[str], candidate: Optional[str], force: bool, scheduled: Optional[str] = None) -> None: """Perform a failover or a switchover operation in the cluster. Informational messages are printed in the console during the operation, as well as the list of members before and after the operation, so the user can follow the operation status. .. note:: If not able to perform the operation through the REST API, write directly to the DCS as a fall back. :param obj: Patroni configuration. :param action: action to be taken -- ``failover`` or ``switchover``. :param cluster_name: name of the Patroni cluster. :param group: filter Citus group within we should perform a failover or switchover. If ``None``, user will be prompted for filling it -- unless *force* is ``True``, in which case an exception is raised. :param switchover_leader: name of the leader member passed as switchover option. :param candidate: name of a standby member to be promoted. Nodes that are tagged with ``nofailover`` cannot be used. :param force: perform the failover or switchover without asking for confirmations. :param scheduled: timestamp when the switchover should be scheduled to occur. If ``now`` perform immediately. :raises: :class:`PatroniCtlException`: if: * Patroni is running on a Citus cluster, but no *group* was specified; or * a switchover was requested by the cluster has no leader; or * *switchover_leader* does not match the current leader of the cluster; or * cluster has no candidates available for the operation; or * no *candidate* is given for a failover operation; or * current leader and *candidate* are the same; or * *candidate* is tagged as nofailover; or * *candidate* is not a member of the cluster; or * trying to schedule a switchover in a cluster that is in maintenance mode; or * user aborts the operation. """ dcs = get_dcs(obj, cluster_name, group) cluster = dcs.get_cluster() click.echo('Current cluster topology') output_members(obj, cluster, cluster_name, group=group) if obj.get('citus') and group is None: if force: raise PatroniCtlException('For Citus clusters the --group must me specified') else: group = click.prompt('Citus group', type=int) dcs = get_dcs(obj, cluster_name, group) cluster = dcs.get_cluster() global_config = get_global_config(cluster) cluster_leader = cluster.leader and cluster.leader.name # leader has to be be defined for switchover only if action == 'switchover': if not cluster_leader: raise PatroniCtlException('This cluster has no leader') if switchover_leader is None: if force: switchover_leader = cluster_leader else: prompt = 'Standby Leader' if global_config.is_standby_cluster else 'Primary' switchover_leader = click.prompt(prompt, type=str, default=cluster_leader) if cluster_leader != switchover_leader: raise PatroniCtlException(f'Member {switchover_leader} is not the leader of cluster {cluster_name}') # excluding members with nofailover tag candidate_names = [str(m.name) for m in cluster.members if m.name != cluster_leader and not m.nofailover] # We sort the names for consistent output to the client candidate_names.sort() if not candidate_names: raise PatroniCtlException('No candidates found to {0} to'.format(action)) if candidate is None and not force: candidate = click.prompt('Candidate ' + str(candidate_names), type=str, default='') if action == 'failover' and not candidate: raise PatroniCtlException('Failover could be performed only to a specific candidate') if candidate and candidate not in candidate_names: if candidate == cluster_leader: raise PatroniCtlException( f'Member {candidate} is already the leader of cluster {cluster_name}') raise PatroniCtlException( f'Member {candidate} does not exist in cluster {cluster_name} or is tagged as nofailover') if all((not force, action == 'failover', global_config.is_synchronous_mode, not cluster.sync.is_empty, not cluster.sync.matches(candidate, True))): if not click.confirm(f'Are you sure you want to failover to the asynchronous node {candidate}?'): raise PatroniCtlException('Aborting ' + action) scheduled_at_str = None scheduled_at = None if action == 'switchover': if scheduled is None and not force: next_hour = (datetime.datetime.now() + datetime.timedelta(hours=1)).strftime('%Y-%m-%dT%H:%M') scheduled = click.prompt('When should the switchover take place (e.g. ' + next_hour + ' ) ', type=str, default='now') scheduled_at = parse_scheduled(scheduled) if scheduled_at: if global_config.is_paused: raise PatroniCtlException("Can't schedule switchover in the paused state") scheduled_at_str = scheduled_at.isoformat() failover_value = {'candidate': candidate} if action == 'switchover': failover_value['leader'] = switchover_leader if scheduled_at_str: failover_value['scheduled_at'] = scheduled_at_str logging.debug(failover_value) # By now we have established that the leader exists and the candidate exists if not force: demote_msg = f', demoting current leader {cluster_leader}' if cluster_leader else '' if scheduled_at_str: # only switchover can be scheduled if not click.confirm(f'Are you sure you want to schedule switchover of cluster ' f'{cluster_name} at {scheduled_at_str}{demote_msg}?'): # action as a var to catch a regression in the tests raise PatroniCtlException('Aborting scheduled ' + action) else: if not click.confirm(f'Are you sure you want to {action} cluster {cluster_name}{demote_msg}?'): raise PatroniCtlException('Aborting ' + action) r = None try: member = cluster.leader.member if cluster.leader else candidate and cluster.get_member(candidate, False) if TYPE_CHECKING: # pragma: no cover assert isinstance(member, Member) r = request_patroni(member, 'post', action, failover_value) # probably old patroni, which doesn't support switchover yet if r.status == 501 and action == 'switchover' and b'Server does not support this operation' in r.data: r = request_patroni(member, 'post', 'failover', failover_value) if r.status in (200, 202): logging.debug(r) cluster = dcs.get_cluster() logging.debug(cluster) click.echo('{0} {1}'.format(timestamp(), r.data.decode('utf-8'))) else: click.echo('{0} failed, details: {1}, {2}'.format(action.title(), r.status, r.data.decode('utf-8'))) return except Exception: logging.exception(r) logging.warning('Failing over to DCS') click.echo('{0} Could not {1} using Patroni api, falling back to DCS'.format(timestamp(), action)) dcs.manual_failover(switchover_leader, candidate, scheduled_at=scheduled_at) output_members(obj, cluster, cluster_name, group=group) @ctl.command('failover', help='Failover to a replica') @arg_cluster_name @option_citus_group @click.option('--leader', '--primary', '--master', 'leader', help='The name of the current leader', default=None) @click.option('--candidate', help='The name of the candidate', default=None) @option_force @click.pass_obj def failover(obj: Dict[str, Any], cluster_name: str, group: Optional[int], leader: Optional[str], candidate: Optional[str], force: bool) -> None: """Process ``failover`` command of ``patronictl`` utility. Perform a failover operation immediately in the cluster. .. note:: If *leader* is given perform a switchover instead of a failover. This behavior is deprecated. ``--leader`` option support will be removed in the next major release. .. seealso:: Refer to :func:`_do_failover_or_switchover` for details. :param obj: Patroni configuration. :param cluster_name: name of the Patroni cluster. :param group: filter Citus group within we should perform a failover or switchover. If ``None``, user will be prompted for filling it -- unless *force* is ``True``, in which case an exception is raised by :func:`_do_failover_or_switchover`. :param leader: name of the current leader member. :param candidate: name of a standby member to be promoted. Nodes that are tagged with ``nofailover`` cannot be used. :param force: perform the failover or switchover without asking for confirmations. """ action = 'failover' if leader: action = 'switchover' click.echo(click.style( 'Supplying a leader name using this command is deprecated and will be removed in a future version of' ' Patroni, change your scripts to use `switchover` instead.\nExecuting switchover!', fg='red')) _do_failover_or_switchover(obj, action, cluster_name, group, leader, candidate, force) @ctl.command('switchover', help='Switchover to a replica') @arg_cluster_name @option_citus_group @click.option('--leader', '--primary', '--master', 'leader', help='The name of the current leader', default=None) @click.option('--candidate', help='The name of the candidate', default=None) @click.option('--scheduled', help='Timestamp of a scheduled switchover in unambiguous format (e.g. ISO 8601)', default=None) @option_force @click.pass_obj def switchover(obj: Dict[str, Any], cluster_name: str, group: Optional[int], leader: Optional[str], candidate: Optional[str], force: bool, scheduled: Optional[str]) -> None: """Process ``switchover`` command of ``patronictl`` utility. Perform a switchover operation in the cluster. .. seealso:: Refer to :func:`_do_failover_or_switchover` for details. :param obj: Patroni configuration. :param cluster_name: name of the Patroni cluster. :param group: filter Citus group within we should perform a switchover. If ``None``, user will be prompted for filling it -- unless *force* is ``True``, in which case an exception is raised by :func:`_do_failover_or_switchover`. :param leader: name of the current leader member. :param candidate: name of a standby member to be promoted. Nodes that are tagged with ``nofailover`` cannot be used. :param force: perform the switchover without asking for confirmations. :param scheduled: timestamp when the switchover should be scheduled to occur. If ``now`` perform immediately. """ _do_failover_or_switchover(obj, 'switchover', cluster_name, group, leader, candidate, force, scheduled) def generate_topology(level: int, member: Dict[str, Any], topology: Dict[Optional[str], List[Dict[str, Any]]]) -> Iterator[Dict[str, Any]]: """Recursively yield members with their names adjusted according to their *level* in the cluster topology. .. note:: The idea is to get a tree view of the members when printing their names. For example, suppose you have a cascading replication composed of 3 nodes, say ``postgresql0``, ``postgresql1``, and ``postgresql2``. This function would adjust their names to be like this: * ``'postgresql0'`` -> ``'postgresql0'`` * ``'postgresql1'`` -> ``'+ postgresql1'`` * ``'postgresql2'`` -> ``' + postgresql2'`` So, if you ever print their names line by line, you would see something like this: .. code-block:: postgresql0 + postgresql1 + postgresql2 :param level: the current level being inspected in the *topology*. :param member: information about the current member being inspected in *level* of *topology*. Should countain at least this key: * ``name``: name of the node, according to ``name`` configuration; But may contain others, which although ignored by this function, will be yielded as part of the resulting object. The value of key ``name`` is changed as explained in the note. :param topology: each key is the name of a node which has at least one replica attached to it. The corresponding value is a list of the attached replicas, each of them with the same structure described for *member*. :yields: the current member with its name changed. Besides that reyield values from recursive calls. """ members = topology.get(member['name'], []) if level > 0: member['name'] = '{0}+ {1}'.format((' ' * (level - 1) * 2), member['name']) if member['name']: yield member for member in members: yield from generate_topology(level + 1, member, topology) def topology_sort(members: List[Dict[str, Any]]) -> Iterator[Dict[str, Any]]: """Sort *members* according to their level in the replication topology tree. :param members: list of members in the cluster. Each item should countain at least these keys: * ``name``: name of the node, according to ``name`` configuration; * ``role``: ``leader``, ``standby_leader`` or ``replica``. Cascading replicas are identified through ``tags`` -> ``replicatefrom`` value -- if that is set, and they are in fact attached to another replica. Besides ``name``, ``role`` and ``tags`` keys, it may contain other keys, which although ignored by this function, will be yielded as part of the resulting object. The value of key ``name`` is changed through :func:`generate_topology`. :yields: *members* sorted by level in the topology, and with a new ``name`` value according to their level in the topology. """ topology: Dict[Optional[str], List[Dict[str, Any]]] = defaultdict(list) leader = next((m for m in members if m['role'].endswith('leader')), {'name': None}) replicas = set(member['name'] for member in members if not member['role'].endswith('leader')) for member in members: if not member['role'].endswith('leader'): parent = member.get('tags', {}).get('replicatefrom') parent = parent if parent and parent != member['name'] and parent in replicas else leader['name'] topology[parent].append(member) for member in generate_topology(0, leader, topology): yield member def get_cluster_service_info(cluster: Dict[str, Any]) -> List[str]: """Get complementary information about the cluster. :param cluster: a Patroni cluster represented as an object created through :func:`~patroni.utils.cluster_as_json`. :returns: a list of 0 or more informational messages. They can be about: * Cluster in maintenance mode; * Scheduled switchovers. """ service_info: List[str] = [] if cluster.get('pause'): service_info.append('Maintenance mode: on') if 'scheduled_switchover' in cluster: info = 'Switchover scheduled at: ' + cluster['scheduled_switchover']['at'] for name in ('from', 'to'): if name in cluster['scheduled_switchover']: info += '\n{0:>24}: {1}'.format(name, cluster['scheduled_switchover'][name]) service_info.append(info) return service_info def output_members(obj: Dict[str, Any], cluster: Cluster, name: str, extended: bool = False, fmt: str = 'pretty', group: Optional[int] = None) -> None: """Print information about the Patroni cluster and its members. Information is printed to console through :func:`print_output`, and contains: * ``Cluster``: name of the Patroni cluster, as per ``scope`` configuration; * ``Member``: name of the Patroni node, as per ``name`` configuration; * ``Host``: hostname (or IP) and port, as per ``postgresql.listen`` configuration; * ``Role``: ``Leader``, ``Standby Leader``, ``Sync Standby`` or ``Replica``; * ``State``: ``stopping``, ``stopped``, ``stop failed``, ``crashed``, ``running``, ``starting``, ``start failed``, ``restarting``, ``restart failed``, ``initializing new cluster``, ``initdb failed``, ``running custom bootstrap script``, ``custom bootstrap failed``, ``creating replica``, ``streaming``, ``in archive recovery``, and so on; * ``TL``: current timeline in Postgres; ``Lag in MB``: replication lag. Besides that it may also have: * ``Group``: Citus group ID -- showed only if Citus is enabled. * ``Pending restart``: if the node is pending a restart -- showed only if *extended*; * ``Scheduled restart``: timestamp for scheduled restart, if any -- showed only if *extended*; * ``Tags``: node tags, if any -- showed only if *extended*. The 3 extended columns are always included if *extended*, even if the member has no value for a given column. If not *extended*, these columns may still be shown if any of the members has any information for them. :param obj: Patroni configuration. :param cluster: Patroni cluster. :param name: name of the Patroni cluster. :param extended: if extended information (pending restarts, scheduled restarts, node tags) should be printed, if available. :param fmt: the output table printing format. See :func:`print_output` for available options. If *fmt* is neither ``topology`` nor ``pretty``, then complementary information gathered through :func:`get_cluster_service_info` is not printed. :param group: filter which Citus group we should get members from. If ``None`` get from all groups. """ rows: List[List[Any]] = [] logging.debug(cluster) initialize = {None: 'uninitialized', '': 'initializing'}.get(cluster.initialize, cluster.initialize) columns = ['Cluster', 'Member', 'Host', 'Role', 'State', 'TL', 'Lag in MB'] clusters = {group or 0: cluster_as_json(cluster)} is_citus_cluster = obj.get('citus') if is_citus_cluster: columns.insert(1, 'Group') if group is None: clusters.update({g: cluster_as_json(c) for g, c in cluster.workers.items()}) all_members = [m for c in clusters.values() for m in c['members'] if 'host' in m] for c in ('Pending restart', 'Scheduled restart', 'Tags'): if extended or any(m.get(c.lower().replace(' ', '_')) for m in all_members): columns.append(c) # Show Host as 'host:port' if somebody is running on non-standard port or two nodes are running on the same host append_port = any('port' in m and m['port'] != 5432 for m in all_members) or\ len(set(m['host'] for m in all_members)) < len(all_members) sort = topology_sort if fmt == 'topology' else iter for g, c in sorted(clusters.items()): for member in sort(c['members']): logging.debug(member) lag = member.get('lag', '') member.update(cluster=name, member=member['name'], group=g, host=member.get('host', ''), tl=member.get('timeline', ''), role=member['role'].replace('_', ' ').title(), lag_in_mb=round(lag / 1024 / 1024) if isinstance(lag, int) else lag, pending_restart='*' if member.get('pending_restart') else '') if append_port and member['host'] and member.get('port'): member['host'] = ':'.join([member['host'], str(member['port'])]) if 'scheduled_restart' in member: value = member['scheduled_restart']['schedule'] if 'postgres_version' in member['scheduled_restart']: value += ' if version < {0}'.format(member['scheduled_restart']['postgres_version']) member['scheduled_restart'] = value rows.append([member.get(n.lower().replace(' ', '_'), '') for n in columns]) title = 'Citus cluster' if is_citus_cluster else 'Cluster' title_details = f' ({initialize})' if is_citus_cluster: title_details = '' if group is None else f' (group: {group}, {initialize})' title = f' {title}: {name}{title_details} ' print_output(columns, rows, {'Group': 'r', 'Lag in MB': 'r', 'TL': 'r'}, fmt, title) if fmt not in ('pretty', 'topology'): # Omit service info when using machine-readable formats return for g, c in sorted(clusters.items()): service_info = get_cluster_service_info(c) if service_info: if is_citus_cluster and group is None: click.echo('Citus group: {0}'.format(g)) click.echo(' ' + '\n '.join(service_info)) @ctl.command('list', help='List the Patroni members for a given Patroni') @click.argument('cluster_names', nargs=-1) @option_citus_group @click.option('--extended', '-e', help='Show some extra information', is_flag=True) @click.option('--timestamp', '-t', 'ts', help='Print timestamp', is_flag=True) @option_format @option_watch @option_watchrefresh @click.pass_obj def members(obj: Dict[str, Any], cluster_names: List[str], group: Optional[int], fmt: str, watch: Optional[int], w: bool, extended: bool, ts: bool) -> None: """Process ``list`` command of ``patronictl`` utility. Print information about the Patroni cluster through :func:`output_members`. :param obj: Patroni configuration. :param cluster_names: name of clusters that should be printed. If ``None`` consider only the cluster present in ``scope`` key of *obj*. :param group: filter which Citus group we should get members from. Refer to the module note for more details. :param fmt: the output table printing format. See :func:`print_output` for available options. :param watch: if given print output every *watch* seconds. :param w: if ``True`` print output every 2 seconds. :param extended: if extended information should be printed. See ``extended`` argument of :func:`output_members` for more details. :param ts: if timestamp should be included in the output. """ if not cluster_names: if 'scope' in obj: cluster_names = [obj['scope']] if not cluster_names: return logging.warning('Listing members: No cluster names were provided') for _ in watching(w, watch): if ts: click.echo(timestamp(0)) for cluster_name in cluster_names: dcs = get_dcs(obj, cluster_name, group) cluster = dcs.get_cluster() output_members(obj, cluster, cluster_name, extended, fmt, group) @ctl.command('topology', help='Prints ASCII topology for given cluster') @click.argument('cluster_names', nargs=-1) @option_citus_group @option_watch @option_watchrefresh @click.pass_context def topology(ctx: click.Context, cluster_names: List[str], group: Optional[int], watch: Optional[int], w: bool) -> None: """Process ``topology`` command of ``patronictl`` utility. Print information about the cluster in ``topology`` format through :func:`members`. :param ctx: click context to be passed to :func:`members`. :param cluster_names: name of clusters that should be printed. See ``cluster_names`` argument of :func:`output_members` for more details. :param group: filter which Citus group we should get members from. See ``group`` argument of :func:`output_members` for more details. :param watch: if given print output every *watch* seconds. :param w: if ``True`` print output every 2 seconds. """ ctx.forward(members, fmt='topology') def timestamp(precision: int = 6) -> str: """Get current timestamp with given *precision* as a string. :param precision: Amount of digits to be present in the precision. :returns: the current timestamp with given *precision*. """ return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:precision - 7] @ctl.command('flush', help='Discard scheduled events') @click.argument('cluster_name') @option_citus_group @click.argument('member_names', nargs=-1) @click.argument('target', type=click.Choice(['restart', 'switchover'])) @click.option('--role', '-r', help='Flush only members with this role', type=role_choice, default='any') @option_force @click.pass_obj def flush(obj: Dict[str, Any], cluster_name: str, group: Optional[int], member_names: List[str], force: bool, role: str, target: str) -> None: """Process ``flush`` command of ``patronictl`` utility. Discard scheduled restart or switchover events. :param obj: Patroni configuration. :param cluster_name: name of the Patroni cluster. :param group: filter which Citus group we should flush an event. Refer to the module note for more details. :param member_names: name of the members which events should be flushed. :param force: perform the operation without asking for confirmations. :param role: role to filter members. See :func:`get_all_members` for available options. :param target: the event that should be flushed -- ``restart`` or ``switchover``. """ dcs = get_dcs(obj, cluster_name, group) cluster = dcs.get_cluster() if target == 'restart': for member in get_members(obj, cluster, cluster_name, member_names, role, force, 'flush', group=group): if member.data.get('scheduled_restart'): r = request_patroni(member, 'delete', 'restart') check_response(r, member.name, 'flush scheduled restart') else: click.echo('No scheduled restart for member {0}'.format(member.name)) elif target == 'switchover': failover = cluster.failover if not failover or not failover.scheduled_at: return click.echo('No pending scheduled switchover') for member in get_all_members_leader_first(cluster): try: r = request_patroni(member, 'delete', 'switchover') if r.status in (200, 404): prefix = 'Success' if r.status == 200 else 'Failed' return click.echo('{0}: {1}'.format(prefix, r.data.decode('utf-8'))) click.echo('Failed: member={0}, status_code={1}, ({2})'.format( member.name, r.status, r.data.decode('utf-8'))) except Exception as err: logging.warning(str(err)) logging.warning('Member %s is not accessible', member.name) logging.warning('Failing over to DCS') click.echo('{0} Could not find any accessible member of cluster {1}'.format(timestamp(), cluster_name)) dcs.manual_failover('', '', version=failover.version) def wait_until_pause_is_applied(dcs: AbstractDCS, paused: bool, old_cluster: Cluster) -> None: """Wait for all members in the cluster to have ``pause`` state set to *paused*. :param dcs: DCS object from where to get fresh cluster information. :param paused: the desired state for ``pause`` in all nodes. :param old_cluster: original cluster information before pause or unpause has been requested. Used to report which nodes are still pending to have ``pause`` equal *paused* at a given point in time. """ config = get_global_config(old_cluster) click.echo("'{0}' request sent, waiting until it is recognized by all nodes".format(paused and 'pause' or 'resume')) old = {m.name: m.version for m in old_cluster.members if m.api_url} loop_wait = config.get('loop_wait') or dcs.loop_wait cluster = None for _ in polling_loop(loop_wait + 1): cluster = dcs.get_cluster() if all(m.data.get('pause', False) == paused for m in cluster.members if m.name in old): break else: if TYPE_CHECKING: # pragma: no cover assert cluster is not None remaining = [m.name for m in cluster.members if m.data.get('pause', False) != paused and m.name in old and old[m.name] != m.version] if remaining: return click.echo("{0} members didn't recognized pause state after {1} seconds" .format(', '.join(remaining), loop_wait)) return click.echo('Success: cluster management is {0}'.format(paused and 'paused' or 'resumed')) def toggle_pause(config: Dict[str, Any], cluster_name: str, group: Optional[int], paused: bool, wait: bool) -> None: """Toggle the ``pause`` state in the cluster members. :param config: Patroni configuration. :param cluster_name: name of the Patroni cluster. :param group: filter which Citus group we should toggle the pause state of. Refer to the module note for more details. :param paused: the desired state for ``pause`` in all nodes. :param wait: ``True`` if it should block until the operation is finished or ``false`` for returning immediately. :raises: PatroniCtlException: if * ``pause`` state is already *paused*; or * cluster contains no accessible members. """ dcs = get_dcs(config, cluster_name, group) cluster = dcs.get_cluster() if get_global_config(cluster).is_paused == paused: raise PatroniCtlException('Cluster is {0} paused'.format(paused and 'already' or 'not')) for member in get_all_members_leader_first(cluster): try: r = request_patroni(member, 'patch', 'config', {'pause': paused or None}) except Exception as err: logging.warning(str(err)) logging.warning('Member %s is not accessible', member.name) continue if r.status == 200: if wait: wait_until_pause_is_applied(dcs, paused, cluster) else: click.echo('Success: cluster management is {0}'.format(paused and 'paused' or 'resumed')) else: click.echo('Failed: {0} cluster management status code={1}, ({2})'.format( paused and 'pause' or 'resume', r.status, r.data.decode('utf-8'))) break else: raise PatroniCtlException('Can not find accessible cluster member') @ctl.command('pause', help='Disable auto failover') @arg_cluster_name @option_default_citus_group @click.pass_obj @click.option('--wait', help='Wait until pause is applied on all nodes', is_flag=True) def pause(obj: Dict[str, Any], cluster_name: str, group: Optional[int], wait: bool) -> None: """Process ``pause`` command of ``patronictl`` utility. Put the cluster in maintenance mode. :param obj: Patroni configuration. :param cluster_name: name of the Patroni cluster. :param group: filter which Citus group we should pause. Refer to the module note for more details. :param wait: ``True`` if it should block until the operation is finished or ``false`` for returning immediately. """ return toggle_pause(obj, cluster_name, group, True, wait) @ctl.command('resume', help='Resume auto failover') @arg_cluster_name @option_default_citus_group @click.option('--wait', help='Wait until pause is cleared on all nodes', is_flag=True) @click.pass_obj def resume(obj: Dict[str, Any], cluster_name: str, group: Optional[int], wait: bool) -> None: """Process ``unpause`` command of ``patronictl`` utility. Put the cluster out of maintenance mode. :param obj: Patroni configuration. :param cluster_name: name of the Patroni cluster. :param group: filter which Citus group we should unpause. Refer to the module note for more details. :param wait: ``True`` if it should block until the operation is finished or ``false`` for returning immediately. """ return toggle_pause(obj, cluster_name, group, False, wait) @contextmanager def temporary_file(contents: bytes, suffix: str = '', prefix: str = 'tmp') -> Iterator[str]: """Create a temporary file with specified contents that persists for the context. :param contents: binary string that will be written to the file. :param prefix: will be prefixed to the filename. :param suffix: will be appended to the filename. :yields: path of the created file. """ tmp = tempfile.NamedTemporaryFile(suffix=suffix, prefix=prefix, delete=False) with tmp: tmp.write(contents) try: yield tmp.name finally: os.unlink(tmp.name) def show_diff(before_editing: str, after_editing: str) -> None: """Show a diff between two strings. Inputs are expected to be unicode strings. If the output is to a tty the diff will be colored. .. note:: If tty it requires a pager program, and uses first found among: * Program given by ``PAGER`` environment variable; or * ``less``; or * ``more``. :param before_editing: string to be compared with *after_editing*. :param after_editing: string to be compared with *before_editing*. :raises: :class:`PatroniCtlException`: if no suitable pager can be found when printing diff output to a tty. """ def listify(string: str) -> List[str]: return [line + '\n' for line in string.rstrip('\n').split('\n')] unified_diff = difflib.unified_diff(listify(before_editing), listify(after_editing)) if sys.stdout.isatty(): buf = io.StringIO() for line in unified_diff: buf.write(str(line)) buf.seek(0) class opts: side_by_side = False width = 80 tab_width = 8 wrap = True pager = next( ( os.path.basename(p) for p in (os.environ.get('PAGER'), "less", "more") if p is not None and bool(shutil.which(p)) ), None, ) pager_options = None if opts.pager is None: raise PatroniCtlException( 'No pager could be found. Either set PAGER environment variable with ' 'your pager or install either "less" or "more" in the host.' ) # if we end up selecting "less" as "pager" then we set "pager" attribute # to "None". "less" is the default pager for "ydiff" module, and that # module adds some command-line options to "less" when "pager" is "None" if opts.pager == 'less': opts.pager = None markup_to_pager(PatchStream(buf), opts) else: for line in unified_diff: click.echo(line.rstrip('\n')) def format_config_for_editing(data: Any, default_flow_style: bool = False) -> str: """Format configuration as YAML for human consumption. :param data: configuration as nested dictionaries. :param default_flow_style: passed down as ``default_flow_style`` argument of :func:`yaml.safe_dump`. :returns: unicode YAML of the configuration. """ return yaml.safe_dump(data, default_flow_style=default_flow_style, encoding=None, allow_unicode=True, width=200) def apply_config_changes(before_editing: str, data: Dict[str, Any], kvpairs: List[str]) -> Tuple[str, Dict[str, Any]]: """Apply config changes specified as a list of key-value pairs. Keys are interpreted as dotted paths into the configuration data structure. Except for paths beginning with ``postgresql.parameters`` where rest of the path is used directly to allow for PostgreSQL GUCs containing dots. Values are interpreted as YAML values. :param before_editing: human representation before editing. :param data: configuration data structure. :param kvpairs: list of strings containing key value pairs separated by ``=``. :returns: tuple of human-readable, parsed data structure after changes. :raises: :class:`PatroniCtlException`: if any entry in *kvpairs* is ``None`` or not in the expected format. """ changed_data = copy.deepcopy(data) def set_path_value(config: Dict[str, Any], path: List[str], value: Any, prefix: Tuple[str, ...] = ()) -> None: """Recursively walk through *config* and update setting specified by *path* with *value*. :param config: configuration data structure with all settings found under *prefix* path. :param path: dotted path split by dot as delimiter into a list. Used to control the recursive calls and identify when a leaf node is reached. :param value: value for configuration described by *path*. If ``None`` the configuration key is removed from *config*. :param prefix: previous parts of *path* that have already been opened by parent recursive calls. Used to know if we are changing a Postgres related setting or not. *prefix* plus *path* compose the original *path* given on the root call. """ # Postgresql GUCs can't be nested, but can contain dots so we re-flatten the structure for this case if prefix == ('postgresql', 'parameters'): path = ['.'.join(path)] key = path[0] # When *path* contains a single item it means we reached a leaf node in the configuration, so we can remove or # update the configuration based on what has been requested by the user. if len(path) == 1: if value is None: config.pop(key, None) else: config[key] = value # Otherwise we need to keep navigating down in the configuration structure. else: if not isinstance(config.get(key), dict): config[key] = {} set_path_value(config[key], path[1:], value, prefix + (key,)) if config[key] == {}: del config[key] for pair in kvpairs: if not pair or "=" not in pair: raise PatroniCtlException("Invalid parameter setting {0}".format(pair)) key_path, value = pair.split("=", 1) set_path_value(changed_data, key_path.strip().split("."), yaml.safe_load(value)) return format_config_for_editing(changed_data), changed_data def apply_yaml_file(data: Dict[str, Any], filename: str) -> Tuple[str, Dict[str, Any]]: """Apply changes from a YAML file to configuration. :param data: configuration data structure. :param filename: name of the YAML file, ``-`` is taken to mean standard input. :returns: tuple of human-readable and parsed data structure after changes. """ changed_data = copy.deepcopy(data) if filename == '-': new_options = yaml.safe_load(sys.stdin) else: with open(filename) as fd: new_options = yaml.safe_load(fd) patch_config(changed_data, new_options) return format_config_for_editing(changed_data), changed_data def invoke_editor(before_editing: str, cluster_name: str) -> Tuple[str, Dict[str, Any]]: """Start editor command to edit configuration in human readable format. .. note:: Requires an editor program, and uses first found among: * Program given by ``EDITOR`` environemnt variable; or * ``editor``; or * ``vi``. :param before_editing: human representation before editing. :param cluster_name: name of the Patroni cluster. :returns: tuple of human-readable, parsed data structure after changes. :raises: :class:`PatroniCtlException`: if * No suitable editor can be found; or * Editor call exits with unexpected return code. """ editor_cmd = os.environ.get('EDITOR') if not editor_cmd: for editor in ('editor', 'vi'): editor_cmd = shutil.which(editor) if editor_cmd: logging.debug('Setting fallback editor_cmd=%s', editor) break if not editor_cmd: raise PatroniCtlException('EDITOR environment variable is not set. editor or vi are not available') with temporary_file(contents=before_editing.encode('utf-8'), suffix='.yaml', prefix='{0}-config-'.format(cluster_name)) as tmpfile: ret = subprocess.call([editor_cmd, tmpfile]) if ret: raise PatroniCtlException("Editor exited with return code {0}".format(ret)) with codecs.open(tmpfile, encoding='utf-8') as fd: after_editing = fd.read() return after_editing, yaml.safe_load(after_editing) @ctl.command('edit-config', help="Edit cluster configuration") @arg_cluster_name @option_default_citus_group @click.option('--quiet', '-q', is_flag=True, help='Do not show changes') @click.option('--set', '-s', 'kvpairs', multiple=True, help='Set specific configuration value. Can be specified multiple times') @click.option('--pg', '-p', 'pgkvpairs', multiple=True, help='Set specific PostgreSQL parameter value. Shorthand for -s postgresql.parameters. ' 'Can be specified multiple times') @click.option('--apply', 'apply_filename', help='Apply configuration from file. Use - for stdin.') @click.option('--replace', 'replace_filename', help='Apply configuration from file, replacing existing configuration.' ' Use - for stdin.') @option_force @click.pass_obj def edit_config(obj: Dict[str, Any], cluster_name: str, group: Optional[int], force: bool, quiet: bool, kvpairs: List[str], pgkvpairs: List[str], apply_filename: Optional[str], replace_filename: Optional[str]) -> None: """Process ``edit-config`` command of ``patronictl`` utility. Update or replace Patroni configuration in the DCS. :param obj: Patroni configuration. :param cluster_name: name of the Patroni cluster. :param group: filter which Citus group configuration we should edit. Refer to the module note for more details. :param force: if ``True`` apply config changes without asking for confirmations. :param quiet: if ``True`` skip showing config diff in the console. :param kvpairs: list of key value general parameters to be changed. :param pgkvpairs: list of key value Postgres parameters to be changed. :param apply_filename: name of the file which contains with new configuration parameters to be applied. Pass ``-`` for using stdin instead. :param replace_filename: name of the file which contains the new configuration parameters to replace the existing configuration. Pass ``-`` for using stdin instead. :raises: :class:`PatroniCtlException`: if * Configuration is absent from DCS; or * Detected a concurrent modification of the configuration in the DCS. """ dcs = get_dcs(obj, cluster_name, group) cluster = dcs.get_cluster() if not cluster.config: raise PatroniCtlException('The config key does not exist in the cluster {0}'.format(cluster_name)) before_editing = format_config_for_editing(cluster.config.data) after_editing = None # Serves as a flag if any changes were requested changed_data = cluster.config.data if replace_filename: after_editing, changed_data = apply_yaml_file({}, replace_filename) if apply_filename: after_editing, changed_data = apply_yaml_file(changed_data, apply_filename) if kvpairs or pgkvpairs: all_pairs = list(kvpairs) + ['postgresql.parameters.' + v.lstrip() for v in pgkvpairs] after_editing, changed_data = apply_config_changes(before_editing, changed_data, all_pairs) # If no changes were specified on the command line invoke editor if after_editing is None: after_editing, changed_data = invoke_editor(before_editing, cluster_name) if cluster.config.data == changed_data: if not quiet: click.echo("Not changed") return if not quiet: show_diff(before_editing, after_editing) if (apply_filename == '-' or replace_filename == '-') and not force: click.echo("Use --force option to apply changes") return if force or click.confirm('Apply these changes?'): if not dcs.set_config_value(json.dumps(changed_data), cluster.config.version): raise PatroniCtlException("Config modification aborted due to concurrent changes") click.echo("Configuration changed") @ctl.command('show-config', help="Show cluster configuration") @arg_cluster_name @option_default_citus_group @click.pass_obj def show_config(obj: Dict[str, Any], cluster_name: str, group: Optional[int]) -> None: """Process ``show-config`` command of ``patronictl`` utility. Show Patroni configuration stored in the DCS. :param obj: Patroni configuration. :param cluster_name: name of the Patroni cluster. :param group: filter which Citus group configuration we should show. Refer to the module note for more details. """ cluster = get_dcs(obj, cluster_name, group).get_cluster() if cluster.config: click.echo(format_config_for_editing(cluster.config.data)) @ctl.command('version', help='Output version of patronictl command or a running Patroni instance') @click.argument('cluster_name', required=False) @click.argument('member_names', nargs=-1) @option_citus_group @click.pass_obj def version(obj: Dict[str, Any], cluster_name: str, group: Optional[int], member_names: List[str]) -> None: """Process ``version`` command of ``patronictl`` utility. Show version of: * ``patronictl`` on invoker; * ``patroni`` on all members of the cluster; * ``PostgreSQL`` on all members of the cluster. :param obj: Patroni configuration. :param cluster_name: name of the Patroni cluster. :param group: filter which Citus group we should get members from. Refer to the module note for more details. :param member_names: filter which members we should get version information from. """ click.echo("patronictl version {0}".format(__version__)) if not cluster_name: return click.echo("") cluster = get_dcs(obj, cluster_name, group).get_cluster() for m in get_all_members(obj, cluster, group, 'any'): if m.api_url: if not member_names or m.name in member_names: try: response = request_patroni(m) data = json.loads(response.data.decode('utf-8')) version = data.get('patroni', {}).get('version') pg_version = data.get('server_version') pg_version_str = " PostgreSQL {0}".format(format_pg_version(pg_version)) if pg_version else "" click.echo("{0}: Patroni {1}{2}".format(m.name, version, pg_version_str)) except Exception as e: click.echo("{0}: failed to get version: {1}".format(m.name, e)) @ctl.command('history', help="Show the history of failovers/switchovers") @arg_cluster_name @option_default_citus_group @option_format @click.pass_obj def history(obj: Dict[str, Any], cluster_name: str, group: Optional[int], fmt: str) -> None: """Process ``history`` command of ``patronictl`` utility. Show the history of failover/switchover events in the cluster. Information is printed to console through :func:`print_output`, and contains: * ``TL``: Postgres timeline when the event occurred; * ``LSN``: Postgres LSN, in bytes, when the event occurred; * ``Reason``: the reason that motivated the event, if any; * ``Timestamp``: timestamp when the event occurred; * ``New Leader``: the Postgres node that was promoted during the event. :param obj: Patroni configuration. :param cluster_name: name of the Patroni cluster. :param group: filter which Citus group we should get events from. Refer to the module note for more details. :param fmt: the output table printing format. See :func:`print_output` for available options. """ cluster = get_dcs(obj, cluster_name, group).get_cluster() cluster_history = cluster.history.lines if cluster.history else [] history: List[List[Any]] = list(map(list, cluster_history)) table_header_row = ['TL', 'LSN', 'Reason', 'Timestamp', 'New Leader'] for line in history: if len(line) < len(table_header_row): add_column_num = len(table_header_row) - len(line) for _ in range(add_column_num): line.append('') print_output(table_header_row, history, {'TL': 'r', 'LSN': 'r'}, fmt) def format_pg_version(version: int) -> str: """Format Postgres version for human consumption. :param version: Postgres version represented as an integer. :returns: Postgres version represented as a human-readable string. :Example: >>> format_pg_version(90624) '9.6.24' >>> format_pg_version(100000) '10.0' >>> format_pg_version(140008) '14.8' """ if version < 100000: return "{0}.{1}.{2}".format(version // 10000, version // 100 % 100, version % 100) else: return "{0}.{1}".format(version // 10000, version % 100) patroni-3.2.2/patroni/daemon.py000066400000000000000000000132421455170150700164720ustar00rootroot00000000000000"""Daemon processes abstraction module. This module implements abstraction classes and functions for creating and managing daemon processes in Patroni. Currently it is only used for the main "Thread" of ``patroni`` and ``patroni_raft_controller`` commands. """ from __future__ import print_function import abc import argparse import os import signal import sys from threading import Lock from typing import Any, Optional, Type, TYPE_CHECKING if TYPE_CHECKING: # pragma: no cover from .config import Config def get_base_arg_parser() -> argparse.ArgumentParser: """Create a basic argument parser with the arguments used for both patroni and raft controller daemon. :returns: 'argparse.ArgumentParser' object """ from .config import Config from .version import __version__ parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version='%(prog)s {0}'.format(__version__)) parser.add_argument('configfile', nargs='?', default='', help='Patroni may also read the configuration from the {0} environment variable' .format(Config.PATRONI_CONFIG_VARIABLE)) return parser class AbstractPatroniDaemon(abc.ABC): """A Patroni daemon process. .. note:: When inheriting from :class:`AbstractPatroniDaemon` you are expected to define the methods :func:`_run_cycle` to determine what it should do in each execution cycle, and :func:`_shutdown` to determine what it should do when shutting down. :ivar logger: log handler used by this daemon. :ivar config: configuration options for this daemon. """ def __init__(self, config: 'Config') -> None: """Set up signal handlers, logging handler and configuration. :param config: configuration options for this daemon. """ from patroni.log import PatroniLogger self.setup_signal_handlers() self.logger = PatroniLogger() self.config = config AbstractPatroniDaemon.reload_config(self, local=True) def sighup_handler(self, *_: Any) -> None: """Handle SIGHUP signals. Flag the daemon as "SIGHUP received". """ self._received_sighup = True def api_sigterm(self) -> bool: """Guarantee only a single SIGTERM is being processed. Flag the daemon as "SIGTERM received" with a lock-based approach. :returns: ``True`` if the daemon was flagged as "SIGTERM received". """ ret = False with self._sigterm_lock: if not self._received_sigterm: self._received_sigterm = True ret = True return ret def sigterm_handler(self, *_: Any) -> None: """Handle SIGTERM signals. Terminate the daemon process through :func:`api_sigterm`. """ if self.api_sigterm(): sys.exit() def setup_signal_handlers(self) -> None: """Set up daemon signal handlers. Set up SIGHUP and SIGTERM signal handlers. .. note:: SIGHUP is only handled in non-Windows environments. """ self._received_sighup = False self._sigterm_lock = Lock() self._received_sigterm = False if os.name != 'nt': signal.signal(signal.SIGHUP, self.sighup_handler) signal.signal(signal.SIGTERM, self.sigterm_handler) @property def received_sigterm(self) -> bool: """If daemon was signaled with SIGTERM.""" with self._sigterm_lock: return self._received_sigterm def reload_config(self, sighup: bool = False, local: Optional[bool] = False) -> None: """Reload configuration. :param sighup: if it is related to a SIGHUP signal. The sighup parameter could be used in the method overridden in a child class. :param local: will be ``True`` if there are changes in the local configuration file. """ if local: self.logger.reload_config(self.config.get('log', {})) @abc.abstractmethod def _run_cycle(self) -> None: """Define what the daemon should do in each execution cycle. Keep being called in the daemon's main loop until the daemon is eventually terminated. """ def run(self) -> None: """Run the daemon process. Start the logger thread and keep running execution cycles until a SIGTERM is eventually received. Also reload configuration uppon receiving SIGHUP. """ self.logger.start() while not self.received_sigterm: if self._received_sighup: self._received_sighup = False self.reload_config(True, self.config.reload_local_configuration()) self._run_cycle() @abc.abstractmethod def _shutdown(self) -> None: """Define what the daemon should do when shutting down.""" def shutdown(self) -> None: """Shut the daemon down when a SIGTERM is received. Shut down the daemon process and the logger thread. """ with self._sigterm_lock: self._received_sigterm = True self._shutdown() self.logger.shutdown() def abstract_main(cls: Type[AbstractPatroniDaemon], configfile: str) -> None: """Create the main entry point of a given daemon process. :param cls: a class that should inherit from :class:`AbstractPatroniDaemon`. :param configfile: """ from .config import Config, ConfigParseError try: config = Config(configfile) except ConfigParseError as e: sys.exit(e.value) controller = cls(config) try: controller.run() except KeyboardInterrupt: pass finally: controller.shutdown() patroni-3.2.2/patroni/dcs/000077500000000000000000000000001455170150700154245ustar00rootroot00000000000000patroni-3.2.2/patroni/dcs/__init__.py000066400000000000000000002472211455170150700175450ustar00rootroot00000000000000"""Abstract classes for Distributed Configuration Store.""" import abc import datetime import importlib import inspect import json import logging import os import pkgutil import re import sys import time from collections import defaultdict from copy import deepcopy from random import randint from threading import Event, Lock from types import ModuleType from typing import Any, Callable, Collection, Dict, List, NamedTuple, Optional, Set, Tuple, Union, TYPE_CHECKING, \ Type, Iterator from urllib.parse import urlparse, urlunparse, parse_qsl import dateutil.parser from ..exceptions import PatroniFatalException from ..utils import deep_compare, uri from ..tags import Tags from ..utils import parse_int if TYPE_CHECKING: # pragma: no cover from ..config import Config SLOT_ADVANCE_AVAILABLE_VERSION = 110000 CITUS_COORDINATOR_GROUP_ID = 0 citus_group_re = re.compile('^(0|[1-9][0-9]*)$') slot_name_re = re.compile('^[a-z0-9_]{1,63}$') logger = logging.getLogger(__name__) def slot_name_from_member_name(member_name: str) -> str: """Translate member name to valid PostgreSQL slot name. .. note:: PostgreSQL's replication slot names must be valid PostgreSQL names. This function maps the wider space of member names to valid PostgreSQL names. Names have their case lowered, dashes and periods common in hostnames are replaced with underscores, other characters are encoded as their unicode codepoint. Name is truncated to 64 characters. Multiple different member names may map to a single slot name. :param member_name: The string to convert to a slot name. :returns: The string converted using the rules described above. """ def replace_char(match: Any) -> str: c = match.group(0) return '_' if c in '-.' else f"u{ord(c):04d}" slot_name = re.sub('[^a-z0-9_]', replace_char, member_name.lower()) return slot_name[0:63] def parse_connection_string(value: str) -> Tuple[str, Union[str, None]]: """Split and rejoin a URL string into a connection URL and an API URL. .. note:: Original Governor stores connection strings for each cluster members in a following format: postgres://{username}:{password}@{connect_address}/postgres Since each of our patroni instances provides their own REST API endpoint, it's good to store this information in DCS along with PostgreSQL connection string. In order to not introduce new keys and be compatible with original Governor we decided to extend original connection string in a following way: postgres://{username}:{password}@{connect_address}/postgres?application_name={api_url} This way original Governor could use such connection string as it is, because of feature of ``libpq`` library. :param value: The URL string to split. :returns: the connection string stored in DCS split into two parts, ``conn_url`` and ``api_url``. """ scheme, netloc, path, params, query, fragment = urlparse(value) conn_url = urlunparse((scheme, netloc, path, params, '', fragment)) api_url = ([v for n, v in parse_qsl(query) if n == 'application_name'] or [None])[0] return conn_url, api_url def dcs_modules() -> List[str]: """Get names of DCS modules, depending on execution environment. .. note:: If being packaged with PyInstaller, modules aren't discoverable dynamically by scanning source directory because :class:`importlib.machinery.FrozenImporter` doesn't implement :func:`iter_modules`. But it is still possible to find all potential DCS modules by iterating through ``toc``, which contains list of all "frozen" resources. :returns: list of known module names with absolute python module path namespace, e.g. ``patroni.dcs.etcd``. """ dcs_dirname = os.path.dirname(__file__) module_prefix = __package__ + '.' if getattr(sys, 'frozen', False): toc: Set[str] = set() # dcs_dirname may contain a dot, which causes pkgutil.iter_importers() # to misinterpret the path as a package name. This can be avoided # altogether by not passing a path at all, because PyInstaller's # FrozenImporter is a singleton and registered as top-level finder. for importer in pkgutil.iter_importers(): if hasattr(importer, 'toc'): toc |= getattr(importer, 'toc') return [module for module in toc if module.startswith(module_prefix) and module.count('.') == 2] return [module_prefix + name for _, name, is_pkg in pkgutil.iter_modules([dcs_dirname]) if not is_pkg] def iter_dcs_classes( config: Optional[Union['Config', Dict[str, Any]]] = None ) -> Iterator[Tuple[str, Type['AbstractDCS']]]: """Attempt to import DCS modules that are present in the given configuration. .. note:: If a module successfully imports we can assume that all its requirements are installed. :param config: configuration information with possible DCS names as keys. If given, only attempt to import DCS modules defined in the configuration. Else, if ``None``, attempt to import any supported DCS module. :yields: a tuple containing the module ``name`` and the imported DCS class object. """ for mod_name in dcs_modules(): name = mod_name.rpartition('.')[2] if config is None or name in config: try: module = importlib.import_module(mod_name) dcs_module = find_dcs_class_in_module(module) if dcs_module: yield name, dcs_module except ImportError: logger.log(logging.DEBUG if config is not None else logging.INFO, 'Failed to import %s', mod_name) def find_dcs_class_in_module(module: ModuleType) -> Optional[Type['AbstractDCS']]: """Try to find the implementation of :class:`AbstractDCS` interface in *module* matching the *module* name. :param module: Imported DCS module. :returns: class with a name matching the name of *module* that implements :class:`AbstractDCS` or ``None`` if not found. """ module_name = module.__name__.rpartition('.')[2] return next( (obj for obj_name, obj in module.__dict__.items() if (obj_name.lower() == module_name and inspect.isclass(obj) and issubclass(obj, AbstractDCS))), None) def get_dcs(config: Union['Config', Dict[str, Any]]) -> 'AbstractDCS': """Attempt to load a Distributed Configuration Store from known available implementations. .. note:: Using the list of available DCS classes returned by :func:`iter_dcs_classes` attempt to dynamically instantiate the class that implements a DCS using the abstract class :class:`AbstractDCS`. Basic top-level configuration parameters retrieved from *config* are propagated to the DCS specific config before being passed to the module DCS class. If no module is found to satisfy configuration then report and log an error. This will cause Patroni to exit. :raises :exc:`PatroniFatalException`: if a load of all available DCS modules have been tried and none succeeded. :param config: object or dictionary with Patroni configuration. This is normally a representation of the main Patroni :returns: The first successfully loaded DCS module which is an implementation of :class:`AbstractDCS`. """ for name, dcs_class in iter_dcs_classes(config): # Propagate some parameters from top level of config if defined to the DCS specific config section. config[name].update({ p: config[p] for p in ('namespace', 'name', 'scope', 'loop_wait', 'patronictl', 'ttl', 'retry_timeout') if p in config}) # From citus section we only need "group" parameter, but will propagate everything just in case. if isinstance(config.get('citus'), dict): config[name].update(config['citus']) return dcs_class(config[name]) raise PatroniFatalException( f"Can not find suitable configuration of distributed configuration store\n" f"Available implementations: {', '.join(sorted([n for n, _ in iter_dcs_classes()]))}") _Version = Union[int, str] _Session = Union[int, float, str, None] class Member(Tags, NamedTuple('Member', [('version', _Version), ('name', str), ('session', _Session), ('data', Dict[str, Any])])): """Immutable object (namedtuple) which represents single member of PostgreSQL cluster. .. note:: We are using an old-style attribute declaration here because otherwise it is not possible to override ``__new__`` method in the :class:`RemoteMember` class. .. note:: These two keys in data are always written to the DCS, but care is taken to maintain consistency and resilience from data that is read: ``conn_url``: connection string containing host, user and password which could be used to access this member. ``api_url``: REST API url of patroni instance Consists of the following fields: :ivar version: modification version of a given member key in a Configuration Store. :ivar name: name of PostgreSQL cluster member. :ivar session: either session id or just ttl in seconds. :ivar data: dictionary containing arbitrary data i.e. ``conn_url``, ``api_url``, ``xlog_location``, ``state``, ``role``, ``tags``, etc... """ @staticmethod def from_node(version: _Version, name: str, session: _Session, value: str) -> 'Member': """Factory method for instantiating :class:`Member` from a JSON serialised string or object. :param version: modification version of a given member key in a Configuration Store. :param name: name of PostgreSQL cluster member. :param session: either session id or just ttl in seconds. :param value: JSON encoded string containing arbitrary data i.e. ``conn_url``, ``api_url``, ``xlog_location``, ``state``, ``role``, ``tags``, etc. OR a connection URL starting with ``postgres://``. :returns: an :class:`Member` instance built with the given arguments. :Example: >>> Member.from_node(-1, '', '', '{"conn_url": "postgres://foo@bar/postgres"}') is not None True >>> Member.from_node(-1, '', '', '{') Member(version=-1, name='', session='', data={}) """ if value.startswith('postgres'): conn_url, api_url = parse_connection_string(value) data = {'conn_url': conn_url, 'api_url': api_url} else: try: data = json.loads(value) assert isinstance(data, dict) except (AssertionError, TypeError, ValueError): data: Dict[str, Any] = {} return Member(version, name, session, data) @property def conn_url(self) -> Optional[str]: """The ``conn_url`` value from :attr:`~Member.data` if defined or constructed from ``conn_kwargs``.""" conn_url = self.data.get('conn_url') if conn_url: return conn_url conn_kwargs = self.data.get('conn_kwargs') if conn_kwargs: conn_url = uri('postgresql', (conn_kwargs.get('host'), conn_kwargs.get('port', 5432))) self.data['conn_url'] = conn_url return conn_url return None def conn_kwargs(self, auth: Union[Any, Dict[str, Any], None] = None) -> Dict[str, Any]: """Give keyword arguments used for PostgreSQL connection settings. :param auth: Authentication properties - can be defined as anything supported by the ``psycopg2`` or ``psycopg`` modules. Converts a key of ``username`` to ``user`` if supplied. :returns: A dictionary containing a merge of default parameter keys ``host``, ``port`` and ``dbname``, with the contents of :attr:`~Member.data` ``conn_kwargs`` key. If those are not defined will parse and reform connection parameters from :attr:`~Member.conn_url`. One of these two attributes needs to have data defined to construct the output dictionary. Finally, *auth* parameters are merged with the dictionary before returned. """ defaults = { "host": None, "port": None, "dbname": None } ret: Optional[Dict[str, Any]] = self.data.get('conn_kwargs') if ret: defaults.update(ret) ret = defaults else: conn_url = self.conn_url if not conn_url: return {} # due to the invalid conn_url we don't care about authentication parameters r = urlparse(conn_url) ret = { 'host': r.hostname, 'port': r.port or 5432, 'dbname': r.path[1:] } self.data['conn_kwargs'] = ret.copy() # apply any remaining authentication parameters if auth and isinstance(auth, dict): ret.update({k: v for k, v in auth.items() if v is not None}) if 'username' in auth: ret['user'] = ret.pop('username') return ret @property def api_url(self) -> Optional[str]: """The ``api_url`` value from :attr:`~Member.data` if defined.""" return self.data.get('api_url') @property def tags(self) -> Dict[str, Any]: """The ``tags`` value from :attr:`~Member.data` if defined, otherwise an empty dictionary.""" return self.data.get('tags', {}) @property def clonefrom(self) -> bool: """``True`` if both ``clonefrom`` tag is ``True`` and a connection URL is defined.""" return super().clonefrom and bool(self.conn_url) @property def state(self) -> str: """The ``state`` value of :attr:`~Member.data`.""" return self.data.get('state', 'unknown') @property def is_running(self) -> bool: """``True`` if the member :attr:`~Member.state` is ``running``.""" return self.state == 'running' @property def patroni_version(self) -> Optional[Tuple[int, ...]]: """The ``version`` string value from :attr:`~Member.data` converted to tuple. :Example: >>> Member.from_node(1, '', '', '{"version":"1.2.3"}').patroni_version (1, 2, 3) """ version = self.data.get('version') if version: try: return tuple(map(int, version.split('.'))) except Exception: logger.debug('Failed to parse Patroni version %s', version) return None @property def lsn(self) -> Optional[int]: """Current LSN (receive/flush/replay).""" return parse_int(self.data.get('xlog_location')) class RemoteMember(Member): """Represents a remote member (typically a primary) for a standby cluster. :cvar ALLOWED_KEYS: Controls access to relevant key names that could be in stored :attr:`~RemoteMember.data`. """ ALLOWED_KEYS: Tuple[str, ...] = ( 'primary_slot_name', 'create_replica_methods', 'restore_command', 'archive_cleanup_command', 'recovery_min_apply_delay', 'no_replication_slot' ) def __new__(cls, name: str, data: Dict[str, Any]) -> 'RemoteMember': """Factory method to construct instance from given *name* and *data*. :param name: name of the remote member. :param data: dictionary of member information, which can contain keys from :const:`~RemoteMember.ALLOWED_KEYS` but also member connection information ``api_url`` and ``conn_kwargs``, and slot information. :returns: constructed instance using supplied parameters. """ return super(RemoteMember, cls).__new__(cls, -1, name, None, data) def __getattr__(self, name: str) -> Any: """Dictionary style key lookup. :param name: key to lookup. :returns: value of *name* key in :attr:`~RemoteMember.data` if key *name* is in :cvar:`~RemoteMember.ALLOWED_KEYS`, else ``None``. """ return self.data.get(name) if name in RemoteMember.ALLOWED_KEYS else None class Leader(NamedTuple): """Immutable object (namedtuple) which represents leader key. Consists of the following fields: :ivar version: modification version of a leader key in a Configuration Store :ivar session: either session id or just ttl in seconds :ivar member: reference to a :class:`Member` object which represents current leader (see :attr:`Cluster.members`) """ version: _Version session: _Session member: Member @property def name(self) -> str: """The leader "member" name.""" return self.member.name def conn_kwargs(self, auth: Optional[Dict[str, str]] = None) -> Dict[str, str]: """Connection keyword arguments. :param auth: an optional dictionary containing authentication information. :returns: the result of the called :meth:`Member.conn_kwargs` method. """ return self.member.conn_kwargs(auth) @property def conn_url(self) -> Optional[str]: """Connection URL value of the :class:`Member` instance.""" return self.member.conn_url @property def data(self) -> Dict[str, Any]: """Data value of the :class:`Member` instance.""" return self.member.data @property def timeline(self) -> Optional[int]: """Timeline value of :attr:`~Member.data`.""" return self.data.get('timeline') @property def checkpoint_after_promote(self) -> Optional[bool]: """Determine whether a checkpoint has occurred for this leader after promotion. :returns: ``True`` if the role is ``master`` or ``primary`` and ``checkpoint_after_promote`` is not set, ``False`` if not a ``master`` or ``primary`` or if the checkpoint hasn't occurred. If the version of Patroni is older than 1.5.6, return ``None``. :Example: >>> Leader(1, '', Member.from_node(1, '', '', '{"version":"z"}')).checkpoint_after_promote """ version = self.member.patroni_version # 1.5.6 is the last version which doesn't expose checkpoint_after_promote: false if version and version > (1, 5, 6): return self.data.get('role') in ('master', 'primary') and 'checkpoint_after_promote' not in self.data return None class Failover(NamedTuple): """Immutable object (namedtuple) representing configuration information required for failover/switchover capability. :ivar version: version of the object. :ivar leader: name of the leader. If value isn't empty we treat it as a switchover from the specified node. :ivar candidate: the name of the member node to be considered as a failover candidate. :ivar scheduled_at: in the case of a switchover the :class:`~datetime.datetime` object to perform the scheduled switchover. :Example: >>> 'Failover' in str(Failover.from_node(1, '{"leader": "cluster_leader"}')) True >>> 'Failover' in str(Failover.from_node(1, {"leader": "cluster_leader"})) True >>> 'Failover' in str(Failover.from_node(1, '{"leader": "cluster_leader", "member": "cluster_candidate"}')) True >>> Failover.from_node(1, 'null') is None False >>> n = '''{"leader": "cluster_leader", "member": "cluster_candidate", ... "scheduled_at": "2016-01-14T10:09:57.1394Z"}''' >>> 'tzinfo=' in str(Failover.from_node(1, n)) True >>> Failover.from_node(1, None) is None False >>> Failover.from_node(1, '{}') is None False >>> 'abc' in Failover.from_node(1, 'abc:def') True """ version: _Version leader: Optional[str] candidate: Optional[str] scheduled_at: Optional[datetime.datetime] @staticmethod def from_node(version: _Version, value: Union[str, Dict[str, str]]) -> 'Failover': """Factory method to parse *value* as failover configuration. :param version: version number for the object. :param value: JSON serialized data or a dictionary of configuration. Can also be a colon ``:`` delimited list of leader, followed by candidate name (legacy format). If ``scheduled_at`` key is defined the value will be parsed by :func:`dateutil.parser.parse`. :returns: constructed :class:`Failover` information object """ if isinstance(value, dict): data: Dict[str, Any] = value elif value: try: data = json.loads(value) assert isinstance(data, dict) except AssertionError: data = {} except ValueError: t = [a.strip() for a in value.split(':')] leader = t[0] candidate = t[1] if len(t) > 1 else None return Failover(version, leader, candidate, None) else: data = {} if data.get('scheduled_at'): data['scheduled_at'] = dateutil.parser.parse(data['scheduled_at']) return Failover(version, data.get('leader'), data.get('member'), data.get('scheduled_at')) def __len__(self) -> int: """Implement ``len`` function capability. .. note:: This magic method aids in the evaluation of "emptiness" of a :class:`Failover` instance. For example: >>> failover = Failover.from_node(1, None) >>> len(failover) 0 >>> assert bool(failover) is False >>> failover = Failover.from_node(1, {"leader": "cluster_leader"}) >>> len(failover) 1 >>> assert bool(failover) is True This makes it easier to write ``if cluster.failover`` rather than the longer statement. """ return int(bool(self.leader)) + int(bool(self.candidate)) class ClusterConfig(NamedTuple): """Immutable object (namedtuple) which represents cluster configuration. :ivar version: version number for the object. :ivar data: dictionary of configuration information. :ivar modify_version: modified version number. """ version: _Version data: Dict[str, Any] modify_version: _Version @staticmethod def from_node(version: _Version, value: str, modify_version: Optional[_Version] = None) -> 'ClusterConfig': """Factory method to parse *value* as configuration information. :param version: version number for object. :param value: raw JSON serialized data, if not parsable replaced with an empty dictionary. :param modify_version: optional modify version number, use *version* if not provided. :returns: constructed :class:`ClusterConfig` instance. :Example: >>> ClusterConfig.from_node(1, '{') is None False """ try: data = json.loads(value) assert isinstance(data, dict) except (AssertionError, TypeError, ValueError): data: Dict[str, Any] = {} modify_version = 0 return ClusterConfig(version, data, version if modify_version is None else modify_version) @property def permanent_slots(self) -> Dict[str, Any]: """Dictionary of permanent slots information looked up from :attr:`~ClusterConfig.data`.""" return (self.data.get('permanent_replication_slots') or self.data.get('permanent_slots') or self.data.get('slots') or {}) @property def ignore_slots_matchers(self) -> List[Dict[str, Any]]: """The value for ``ignore_slots`` from :attr:`~ClusterConfig.data` if defined or an empty list.""" return self.data.get('ignore_slots') or [] @property def max_timelines_history(self) -> int: """The value for ``max_timelines_history`` from :attr:`~ClusterConfig.data` if defined or ``0``.""" return self.data.get('max_timelines_history', 0) class SyncState(NamedTuple): """Immutable object (namedtuple) which represents last observed synchronous replication state. :ivar version: modification version of a synchronization key in a Configuration Store. :ivar leader: reference to member that was leader. :ivar sync_standby: synchronous standby list (comma delimited) which are last synchronized to leader. """ version: Optional[_Version] leader: Optional[str] sync_standby: Optional[str] @staticmethod def from_node(version: Optional[_Version], value: Union[str, Dict[str, Any], None]) -> 'SyncState': """Factory method to parse *value* as synchronisation state information. :param version: optional *version* number for the object. :param value: (optionally JSON serialised) sychronisation state information :returns: constructed :class:`SyncState` object. :Example: >>> SyncState.from_node(1, None).leader is None True >>> SyncState.from_node(1, '{}').leader is None True >>> SyncState.from_node(1, '{').leader is None True >>> SyncState.from_node(1, '[]').leader is None True >>> SyncState.from_node(1, '{"leader": "leader"}').leader == "leader" True >>> SyncState.from_node(1, {"leader": "leader"}).leader == "leader" True """ try: if value and isinstance(value, str): value = json.loads(value) assert isinstance(value, dict) return SyncState(version, value.get('leader'), value.get('sync_standby')) except (AssertionError, TypeError, ValueError): return SyncState.empty(version) @staticmethod def empty(version: Optional[_Version] = None) -> 'SyncState': """Construct an empty :class:`SyncState` instance. :param version: optional version number. :returns: empty synchronisation state object. """ return SyncState(version, None, None) @property def is_empty(self) -> bool: """``True`` if ``/sync`` key is not valid (doesn't have a leader).""" return not self.leader @staticmethod def _str_to_list(value: str) -> List[str]: """Splits a string by comma and returns list of strings. :param value: a comma separated string. :returns: list of non-empty strings after splitting an input value by comma. """ return list(filter(lambda a: a, [s.strip() for s in value.split(',')])) @property def members(self) -> List[str]: """:attr:`~SyncState.sync_standby` as list or an empty list if undefined or object considered ``empty``.""" return self._str_to_list(self.sync_standby) if not self.is_empty and self.sync_standby else [] def matches(self, name: Optional[str], check_leader: bool = False) -> bool: """Checks if node is presented in the /sync state. Since PostgreSQL does case-insensitive checks for synchronous_standby_name we do it also. :param name: name of the node. :param check_leader: by default the *name* is searched for only in members, a value of ``True`` will include the leader to list. :returns: ``True`` if the ``/sync`` key not :func:`is_empty` and the given *name* is among those presented in the sync state. :Example: >>> s = SyncState(1, 'foo', 'bar,zoo') >>> s.matches('foo') False >>> s.matches('fOo', True) True >>> s.matches('Bar') True >>> s.matches('zoO') True >>> s.matches('baz') False >>> s.matches(None) False >>> SyncState.empty(1).matches('foo') False """ ret = False if name and not self.is_empty: search_str = (self.sync_standby or '') + (',' + (self.leader or '') if check_leader else '') ret = name.lower() in self._str_to_list(search_str.lower()) return ret def leader_matches(self, name: Optional[str]) -> bool: """Compare the given *name* to stored leader value. :returns: ``True`` if *name* is matching the :attr:`~SyncState.leader` value. """ return bool(name and not self.is_empty and name.lower() == (self.leader or '').lower()) _HistoryTuple = Union[Tuple[int, int, str], Tuple[int, int, str, str], Tuple[int, int, str, str, str]] class TimelineHistory(NamedTuple): """Object representing timeline history file. .. note:: The content held in *lines* deserialized from *value* are lines parsed from PostgreSQL timeline history files, consisting of the timeline number, the LSN where the timeline split and any other string held in the file. The files are parsed by :func:`~patroni.postgresql.misc.parse_history`. :ivar version: version number of the file. :ivar value: raw JSON serialised data consisting of parsed lines from history files. :ivar lines: ``List`` of ``Tuple`` parsed lines from history files. """ version: _Version value: Any lines: List[_HistoryTuple] @staticmethod def from_node(version: _Version, value: str) -> 'TimelineHistory': """Parse the given JSON serialized string as a list of timeline history lines. :param version: version number :param value: JSON serialized string, consisting of parsed lines of PostgreSQL timeline history files, see :class:`TimelineHistory`. :returns: composed timeline history object using parsed lines. :Example: If the passed *value* argument is not parsed an empty list of lines is returned: >>> h = TimelineHistory.from_node(1, 2) >>> h.lines [] """ try: lines = json.loads(value) assert isinstance(lines, list) except (AssertionError, TypeError, ValueError): lines: List[_HistoryTuple] = [] return TimelineHistory(version, value, lines) class Status(NamedTuple): """Immutable object (namedtuple) which represents `/status` key. Consists of the following fields: :ivar last_lsn: :class:`int` object containing position of last known leader LSN. :ivar slots: state of permanent replication slots on the primary in the format: ``{"slot_name": int}``. """ last_lsn: int slots: Optional[Dict[str, int]] @staticmethod def empty() -> 'Status': """Construct an empty :class:`Status` instance. :returns: empty :class:`Status` object. """ return Status(0, None) @staticmethod def from_node(value: Union[str, Dict[str, Any], None]) -> 'Status': """Factory method to parse *value* as :class:`Status` object. :param value: JSON serialized string :returns: constructed :class:`Status` object. """ try: if isinstance(value, str): value = json.loads(value) except Exception: return Status.empty() if isinstance(value, int): # legacy return Status(value, None) if not isinstance(value, dict): return Status.empty() try: last_lsn = int(value.get('optime', '')) except Exception: last_lsn = 0 slots: Union[str, Dict[str, int], None] = value.get('slots') if isinstance(slots, str): try: slots = json.loads(slots) except Exception: slots = None if not isinstance(slots, dict): slots = None return Status(last_lsn, slots) class Cluster(NamedTuple('Cluster', [('initialize', Optional[str]), ('config', Optional[ClusterConfig]), ('leader', Optional[Leader]), ('status', Status), ('members', List[Member]), ('failover', Optional[Failover]), ('sync', SyncState), ('history', Optional[TimelineHistory]), ('failsafe', Optional[Dict[str, str]]), ('workers', Dict[int, 'Cluster'])])): """Immutable object (namedtuple) which represents PostgreSQL or Citus cluster. .. note:: We are using an old-style attribute declaration here because otherwise it is not possible to override `__new__` method. Without it the *workers* by default gets always the same :class:`dict` object that could be mutated. Consists of the following fields: :ivar initialize: shows whether this cluster has initialization key stored in DC or not. :ivar config: global dynamic configuration, reference to `ClusterConfig` object. :ivar leader: :class:`Leader` object which represents current leader of the cluster. :ivar status: :class:`Status` object which represents the `/status` key. :ivar members: list of:class:` Member` objects, all PostgreSQL cluster members including leader :ivar failover: reference to :class:`Failover` object. :ivar sync: reference to :class:`SyncState` object, last observed synchronous replication state. :ivar history: reference to `TimelineHistory` object. :ivar failsafe: failsafe topology. Node is allowed to become the leader only if its name is found in this list. :ivar workers: dictionary of workers of the Citus cluster, optional. Each key is an :class:`int` representing the group, and the corresponding value is a :class:`Cluster` instance. """ def __new__(cls, *args: Any, **kwargs: Any): """Make workers argument optional and set it to an empty dict object.""" if len(args) < len(cls._fields) and 'workers' not in kwargs: kwargs['workers'] = {} return super(Cluster, cls).__new__(cls, *args, **kwargs) @property def last_lsn(self) -> int: """Last known leader LSN.""" return self.status.last_lsn @property def slots(self) -> Optional[Dict[str, int]]: """State of permanent replication slots on the primary in the format: ``{"slot_name": int}``.""" return self.status.slots @staticmethod def empty() -> 'Cluster': """Produce an empty :class:`Cluster` instance.""" return Cluster(None, None, None, Status.empty(), [], None, SyncState.empty(), None, None, {}) def is_empty(self): """Validate definition of all attributes of this :class:`Cluster` instance. :returns: ``True`` if all attributes of the current :class:`Cluster` are unpopulated. """ return all((self.initialize is None, self.config is None, self.leader is None, self.last_lsn == 0, self.members == [], self.failover is None, self.sync.version is None, self.history is None, self.slots is None, self.failsafe is None, self.workers == {})) def __len__(self) -> int: """Implement ``len`` function capability. .. note:: This magic method aids in the evaluation of "emptiness" of a ``Cluster`` instance. For example: >>> cluster = Cluster.empty() >>> len(cluster) 0 >>> assert bool(cluster) is False >>> cluster = Cluster(None, None, None, Status(0, None), [1, 2, 3], None, SyncState.empty(), None, None, {}) >>> len(cluster) 1 >>> assert bool(cluster) is True This makes it easier to write ``if cluster`` rather than the longer statement. """ return int(not self.is_empty()) @property def leader_name(self) -> Optional[str]: """The name of the leader if defined otherwise ``None``.""" return self.leader and self.leader.name def is_unlocked(self) -> bool: """Check if the cluster does not have the leader. :returns: ``True`` if a leader name is not defined. """ return not self.leader_name def has_member(self, member_name: str) -> bool: """Check if the given member name is present in the cluster. :param member_name: name to look up in the :attr:`~Cluster.members`. :returns: ``True`` if the member name is found. """ return any(m for m in self.members if m.name == member_name) def get_member(self, member_name: str, fallback_to_leader: bool = True) -> Union[Member, Leader, None]: """Get :class:`Member` object by name or the :class:`Leader`. :param member_name: name of the member to retrieve. :param fallback_to_leader: if ``True`` return the :class:`Leader` instead if the member cannot be found. :returns: the :class:`Member` if found or :class:`Leader` object. """ return next((m for m in self.members if m.name == member_name), self.leader if fallback_to_leader else None) def get_clone_member(self, exclude_name: str) -> Union[Member, Leader, None]: """Get member or leader object to use as clone source. :param exclude_name: name of a member name to exclude. :returns: a randomly selected candidate member from available running members that are configured to as viable sources for cloning (has tag ``clonefrom`` in configuration). If no member is appropriate the current leader is used. """ exclude = [exclude_name] + ([self.leader.name] if self.leader else []) candidates = [m for m in self.members if m.clonefrom and m.is_running and m.name not in exclude] return candidates[randint(0, len(candidates) - 1)] if candidates else self.leader @staticmethod def is_physical_slot(value: Union[Any, Dict[str, Any]]) -> bool: """Check whether provided configuration is for permanent physical replication slot. :param value: configuration of the permanent replication slot. :returns: ``True`` if *value* is a physical replication slot, otherwise ``False``. """ return not value or isinstance(value, dict) and value.get('type', 'physical') == 'physical' @staticmethod def is_logical_slot(value: Union[Any, Dict[str, Any]]) -> bool: """Check whether provided configuration is for permanent logical replication slot. :param value: configuration of the permanent replication slot. :returns: ``True`` if *value* is a logical replication slot, otherwise ``False``. """ return isinstance(value, dict) \ and value.get('type', 'logical') == 'logical' \ and bool(value.get('database') and value.get('plugin')) @property def __permanent_slots(self) -> Dict[str, Union[Dict[str, Any], Any]]: """Dictionary of permanent replication slots with their known LSN.""" ret: Dict[str, Union[Dict[str, Any], Any]] = deepcopy(self.config.permanent_slots if self.config else {}) members: Dict[str, int] = {slot_name_from_member_name(m.name): m.lsn or 0 for m in self.members} slots: Dict[str, int] = {k: parse_int(v) or 0 for k, v in (self.slots or {}).items()} for name, value in list(ret.items()): if not value: value = ret[name] = {} if isinstance(value, dict): # for permanent physical slots we want to get MAX LSN from the `Cluster.slots` and from the # member with the matching name. It is necessary because we may have the replication slot on # the primary that is streaming from the other standby node using the `replicatefrom` tag. lsn = max(members.get(name, 0) if self.is_physical_slot(value) else 0, slots.get(name, 0)) if lsn: value['lsn'] = lsn else: # Don't let anyone set 'lsn' in the global configuration :) value.pop('lsn', None) return ret @property def __permanent_physical_slots(self) -> Dict[str, Any]: """Dictionary of permanent ``physical`` replication slots.""" return {name: value for name, value in self.__permanent_slots.items() if self.is_physical_slot(value)} @property def __permanent_logical_slots(self) -> Dict[str, Any]: """Dictionary of permanent ``logical`` replication slots.""" return {name: value for name, value in self.__permanent_slots.items() if self.is_logical_slot(value)} @property def use_slots(self) -> bool: """``True`` if cluster is configured to use replication slots.""" return bool(self.config and (self.config.data.get('postgresql') or {}).get('use_slots', True)) def get_replication_slots(self, my_name: str, role: str, nofailover: bool, major_version: int, *, is_standby_cluster: bool = False, show_error: bool = False) -> Dict[str, Dict[str, Any]]: """Lookup configured slot names in the DCS, report issues found and merge with permanent slots. Will log an error if: * Any logical slots are disabled, due to version compatibility, and *show_error* is ``True``. :param my_name: name of this node. :param role: role of this node. :param nofailover: ``True`` if this node is tagged to not be a failover candidate. :param major_version: postgresql major version. :param is_standby_cluster: ``True`` if it is known that this is a standby cluster. We pass the value from the outside because we want to protect from the ``/config`` key removal. :param show_error: if ``True`` report error if any disabled logical slots or conflicting slot names are found. :returns: final dictionary of slot names, after merging with permanent slots and performing sanity checks. """ slots: Dict[str, Dict[str, str]] = self._get_members_slots(my_name, role) permanent_slots: Dict[str, Any] = self._get_permanent_slots(is_standby_cluster=is_standby_cluster, role=role, nofailover=nofailover, major_version=major_version) disabled_permanent_logical_slots: List[str] = self._merge_permanent_slots( slots, permanent_slots, my_name, major_version) if disabled_permanent_logical_slots and show_error: logger.error("Permanent logical replication slots supported by Patroni only starting from PostgreSQL 11. " "Following slots will not be created: %s.", disabled_permanent_logical_slots) return slots def _merge_permanent_slots(self, slots: Dict[str, Dict[str, str]], permanent_slots: Dict[str, Any], my_name: str, major_version: int) -> List[str]: """Merge replication *slots* for members with *permanent_slots*. Perform validation of configured permanent slot name, skipping invalid names. Will update *slots* in-line based on ``type`` of slot, ``physical`` or ``logical``, and name of node. Type is assumed to be ``physical`` if there are no attributes stored as the slot value. :param slots: Slot names with existing attributes if known. :param my_name: name of this node. :param permanent_slots: dictionary containing slot name key and slot information values. :param major_version: postgresql major version. :returns: List of disabled permanent, logical slot names, if postgresql version < 11. """ disabled_permanent_logical_slots: List[str] = [] for name, value in permanent_slots.items(): if not slot_name_re.match(name): logger.error("Invalid permanent replication slot name '%s'", name) logger.error("Slot name may only contain lower case letters, numbers, and the underscore chars") continue value = deepcopy(value) if value else {'type': 'physical'} if isinstance(value, dict): if 'type' not in value: value['type'] = 'logical' if value.get('database') and value.get('plugin') else 'physical' if value['type'] == 'physical': # Don't try to create permanent physical replication slot for yourself if name != slot_name_from_member_name(my_name): slots[name] = value continue if self.is_logical_slot(value): if major_version < SLOT_ADVANCE_AVAILABLE_VERSION: disabled_permanent_logical_slots.append(name) elif name in slots: logger.error("Permanent logical replication slot {'%s': %s} is conflicting with" " physical replication slot for cluster member", name, value) else: slots[name] = value continue logger.error("Bad value for slot '%s' in permanent_slots: %s", name, permanent_slots[name]) return disabled_permanent_logical_slots def _get_permanent_slots(self, *, is_standby_cluster: bool, role: str, nofailover: bool, major_version: int) -> Dict[str, Any]: """Get configured permanent replication slots. .. note:: Permanent replication slots are only considered if ``use_slots`` configuration is enabled. A node that is not supposed to become a leader (*nofailover*) will not have permanent replication slots. In a standby cluster we only support physical replication slots. The returned dictionary for a non-standby cluster always contains permanent logical replication slots in order to show a warning if they are not supported by PostgreSQL before v11. :param is_standby_cluster: ``True`` if it is known that this is a standby cluster. We pass the value from the outside because we want to protect from the ``/config`` key removal. :param role: role of this node -- ``primary``, ``standby_leader`` or ``replica``. :param nofailover: ``True`` if this node is tagged to not be a failover candidate. :param major_version: postgresql major version. :returns: dictionary of permanent slot names mapped to attributes. """ if not self.use_slots or nofailover: return {} if is_standby_cluster: return self.__permanent_physical_slots \ if major_version >= SLOT_ADVANCE_AVAILABLE_VERSION or role == 'standby_leader' else {} return self.__permanent_slots if major_version >= SLOT_ADVANCE_AVAILABLE_VERSION\ or role in ('master', 'primary') else self.__permanent_logical_slots def _get_members_slots(self, my_name: str, role: str) -> Dict[str, Dict[str, str]]: """Get physical replication slots configuration for members that sourcing from this node. If the ``replicatefrom`` tag is set on the member - we should not create the replication slot for it on the current primary, because that member would replicate from elsewhere. We still create the slot if the ``replicatefrom`` destination member is currently not a member of the cluster (fallback to the primary), or if ``replicatefrom`` destination member happens to be the current primary. Will log an error if: * Conflicting slot names between members are found :param my_name: name of this node. :param role: role of this node, if this is a ``primary`` or ``standby_leader`` return list of members replicating from this node. If not then return a list of members replicating as cascaded replicas from this node. :returns: dictionary of physical replication slots that should exist on a given node. """ if not self.use_slots: return {} # we always want to exclude the member with our name from the list members = filter(lambda m: m.name != my_name, self.members) if role in ('master', 'primary', 'standby_leader'): members = [m for m in members if m.replicatefrom is None or m.replicatefrom == my_name or not self.has_member(m.replicatefrom)] else: # only manage slots for replicas that replicate from this one, except for the leader among them members = [m for m in members if m.replicatefrom == my_name and m.name != self.leader_name] slots = {slot_name_from_member_name(m.name): {'type': 'physical'} for m in members} if len(slots) < len(members): # Find which names are conflicting for a nicer error message slot_conflicts: Dict[str, List[str]] = defaultdict(list) for member in members: slot_conflicts[slot_name_from_member_name(member.name)].append(member.name) logger.error("Following cluster members share a replication slot name: %s", "; ".join(f"{', '.join(v)} map to {k}" for k, v in slot_conflicts.items() if len(v) > 1)) return slots def has_permanent_slots(self, my_name: str, *, is_standby_cluster: bool = False, nofailover: bool = False, major_version: int = SLOT_ADVANCE_AVAILABLE_VERSION) -> bool: """Check if the given member node has permanent replication slots configured. :param my_name: name of the member node to check. :param is_standby_cluster: ``True`` if it is known that this is a standby cluster. We pass the value from the outside because we want to protect from the ``/config`` key removal. :param nofailover: ``True`` if this node is tagged to not be a failover candidate. :param major_version: postgresql major version. :returns: ``True`` if there are permanent replication slots configured, otherwise ``False``. """ role = 'replica' members_slots: Dict[str, Dict[str, str]] = self._get_members_slots(my_name, role) permanent_slots: Dict[str, Any] = self._get_permanent_slots(is_standby_cluster=is_standby_cluster, role=role, nofailover=nofailover, major_version=major_version) slots = deepcopy(members_slots) self._merge_permanent_slots(slots, permanent_slots, my_name, major_version) return len(slots) > len(members_slots) or any(self.is_physical_slot(v) for v in permanent_slots.values()) def filter_permanent_slots(self, slots: Dict[str, int], is_standby_cluster: bool, major_version: int) -> Dict[str, int]: """Filter out all non-permanent slots from provided *slots* dict. :param slots: slot names with LSN values :param is_standby_cluster: ``True`` if it is known that this is a standby cluster. We pass the value from the outside because we want to protect from the ``/config`` key removal. :param major_version: postgresql major version. :returns: a :class:`dict` object that contains only slots that are known to be permanent. """ if major_version < SLOT_ADVANCE_AVAILABLE_VERSION: return {} # for legacy PostgreSQL we don't support permanent slots on standby nodes permanent_slots: Dict[str, Any] = self._get_permanent_slots(is_standby_cluster=is_standby_cluster, role='replica', nofailover=False, major_version=major_version) members_slots = {slot_name_from_member_name(m.name) for m in self.members} return {name: value for name, value in slots.items() if name in permanent_slots and (self.is_physical_slot(permanent_slots[name]) or self.is_logical_slot(permanent_slots[name]) and name not in members_slots)} def _has_permanent_logical_slots(self, my_name: str, nofailover: bool) -> bool: """Check if the given member node has permanent ``logical`` replication slots configured. :param my_name: name of the member node to check. :param nofailover: ``True`` if this node is tagged to not be a failover candidate. :returns: ``True`` if any detected replications slots are ``logical``, otherwise ``False``. """ slots = self.get_replication_slots(my_name, 'replica', nofailover, SLOT_ADVANCE_AVAILABLE_VERSION).values() return any(v for v in slots if v.get("type") == "logical") def should_enforce_hot_standby_feedback(self, my_name: str, nofailover: bool) -> bool: """Determine whether ``hot_standby_feedback`` should be enabled for the given member. The ``hot_standby_feedback`` must be enabled if the current replica has ``logical`` slots, or it is working as a cascading replica for the other node that has ``logical`` slots. :param my_name: name of the member node to check. :param nofailover: ``True`` if this node is tagged to not be a failover candidate. :returns: ``True`` if this node or any member replicating from this node has permanent logical slots, otherwise ``False``. """ if self._has_permanent_logical_slots(my_name, nofailover): return True if self.use_slots: members = [m for m in self.members if m.replicatefrom == my_name and m.name != self.leader_name] return any(self.should_enforce_hot_standby_feedback(m.name, m.nofailover) for m in members) return False def get_my_slot_name_on_primary(self, my_name: str, replicatefrom: Optional[str]) -> str: """Canonical slot name for physical replication. .. note:: P <-- I <-- L In case of cascading replication we have to check not our physical slot, but slot of the replica that connects us to the primary. :param my_name: the member node name that is replicating. :param replicatefrom: the Intermediate member name that is configured to replicate for cascading replication. :returns: The slot name that is in use for physical replication on this no`de. """ m = self.get_member(replicatefrom, False) if replicatefrom else None return self.get_my_slot_name_on_primary(m.name, m.replicatefrom) \ if isinstance(m, Member) else slot_name_from_member_name(my_name) @property def timeline(self) -> int: """Get the cluster history index from the :attr:`~Cluster.history`. :returns: If the recorded history is empty assume timeline is ``1``, if it is not defined or the stored history is not formatted as expected ``0`` is returned and an error will be logged. Otherwise, the last number stored incremented by 1 is returned. :Example: No history provided: >>> Cluster(0, 0, 0, Status.empty(), 0, 0, 0, 0, None, {}).timeline 0 Empty history assume timeline is ``1``: >>> Cluster(0, 0, 0, Status.empty(), 0, 0, 0, TimelineHistory.from_node(1, '[]'), None, {}).timeline 1 Invalid history format, a string of ``a``, returns ``0``: >>> Cluster(0, 0, 0, Status.empty(), 0, 0, 0, TimelineHistory.from_node(1, '[["a"]]'), None, {}).timeline 0 History as a list of strings: >>> history = TimelineHistory.from_node(1, '[["3", "2", "1"]]') >>> Cluster(0, 0, 0, Status.empty(), 0, 0, 0, history, None, {}).timeline 4 """ if self.history: if self.history.lines: try: return int(self.history.lines[-1][0]) + 1 except Exception: logger.error('Failed to parse cluster history from DCS: %s', self.history.lines) elif self.history.value == '[]': return 1 return 0 @property def min_version(self) -> Optional[Tuple[int, ...]]: """Lowest Patroni software version found in known members of the cluster.""" return next(iter(sorted(m.patroni_version for m in self.members if m.patroni_version)), None) class ReturnFalseException(Exception): """Exception to be caught by the :func:`catch_return_false_exception` decorator.""" def catch_return_false_exception(func: Callable[..., Any]) -> Any: """Decorator function for catching functions raising :exc:`ReturnFalseException`. :param func: function to be wrapped. :returns: wrapped function. """ def wrapper(*args: Any, **kwargs: Any): try: return func(*args, **kwargs) except ReturnFalseException: return False return wrapper class AbstractDCS(abc.ABC): """Abstract representation of DCS modules. Implementations of a concrete DCS class, using appropriate backend client interfaces, must include the following methods and properties. Functional methods that are critical in their timing, required to complete within ``retry_timeout`` period in order to prevent the DCS considered inaccessible, each perform construction of complex data objects: * :meth:`~AbstractDCS._cluster_loader`: method which processes the structure of data stored in the DCS used to build the :class:`Cluster` object with all relevant associated data. * :meth:`~AbstractDCS._citus_cluster_loader`: Similar to above but specifically representing Citus group and workers information. * :meth:`~AbstractDCS._load_cluster`: main method for calling specific ``loader`` method to build the :class:`Cluster` object representing the state and topology of the cluster. Functional methods that are critical in their timing and must be written with ACID transaction properties in mind: * :meth:`~AbstractDCS.attempt_to_acquire_leader`: method used in the leader race to attempt to acquire the leader lock by creating the leader key in the DCS, if it does not exist. * :meth:`~AbstractDCS._update_leader`: method to update ``leader`` key in DCS. Relies on Compare-And-Set to ensure the Primary lock key is updated. If this fails to update within the ``retry_timeout`` window the Primary will be demoted. Functional method that relies on Compare-And-Create to ensure only one member creates the relevant key: * :meth:`~AbstractDCS.initialize`: method used in the race for cluster initialization which creates the ``initialize`` key in the DCS. DCS backend getter and setter methods and properties: * :meth:`~AbstractDCS.take_leader`: method to create a new leader key in the DCS. * :meth:`~AbstractDCS.set_ttl`: method for setting TTL value in DCS. * :meth:`~AbstractDCS.ttl`: property which returns the current TTL. * :meth:`~AbstractDCS.set_retry_timeout`: method for setting ``retry_timeout`` in DCS backend. * :meth:`~AbstractDCS._write_leader_optime`: compatibility method to write WAL LSN to DCS. * :meth:`~AbstractDCS._write_status`: method to write WAL LSN for slots to the DCS. * :meth:`~AbstractDCS._write_failsafe`: method to write cluster topology to the DCS, used by failsafe mechanism. * :meth:`~AbstractDCS.touch_member`: method to update individual member key in the DCS. * :meth:`~AbstractDCS.set_history_value`: method to set the ``history`` key in the DCS. DCS setter methods using Compare-And-Set which although important are less critical if they fail, attempts can be retried or may result in warning log messages: * :meth:`~AbstractDCS.set_failover_value`: method to create and/or update the ``failover`` key in the DCS. * :meth:`~AbstractDCS.set_config_value`: method to create and/or update the ``failover`` key in the DCS. * :meth:`~AbstractDCS.set_sync_state_value`: method to set the synchronous state ``sync`` key in the DCS. DCS data and key removal methods: * :meth:`~AbstractDCS.delete_sync_state`: likewise, a method to remove synchronous state ``sync`` key from the DCS. * :meth:`~AbstractDCS.delete_cluster`: method which will remove cluster information from the DCS. Used only from `patronictl`. * :meth:`~AbstractDCS._delete_leader`: method relies on CAS, used by a member that is the current leader, to remove the ``leader`` key in the DCS. * :meth:`~AbstractDCS.cancel_initialization`: method to remove the ``initialize`` key for the cluster from the DCS. If either of the `sync_state` set or delete methods fail, although not critical, this may result in ``Synchronous replication key updated by someone else`` messages being logged. Care should be taken to consult each abstract method for any additional information and requirements such as expected exceptions that should be raised in certain conditions and the object types for arguments and return from methods and properties. """ _INITIALIZE = 'initialize' _CONFIG = 'config' _LEADER = 'leader' _FAILOVER = 'failover' _HISTORY = 'history' _MEMBERS = 'members/' _OPTIME = 'optime' _STATUS = 'status' # JSON, contains "leader_lsn" and confirmed_flush_lsn of logical "slots" on the leader _LEADER_OPTIME = _OPTIME + '/' + _LEADER # legacy _SYNC = 'sync' _FAILSAFE = 'failsafe' def __init__(self, config: Dict[str, Any]) -> None: """Prepare DCS paths, Citus group ID, initial values for state information and processing dependencies. :ivar config: :class:`dict`, reference to config section of selected DCS. i.e.: ``zookeeper`` for zookeeper, ``etcd`` for etcd, etc... """ self._name = config['name'] self._base_path = re.sub('/+', '/', '/'.join(['', config.get('namespace', 'service'), config['scope']])) self._citus_group = str(config['group']) if isinstance(config.get('group'), int) else None self._set_loop_wait(config.get('loop_wait', 10)) self._ctl = bool(config.get('patronictl', False)) self._cluster: Optional[Cluster] = None self._cluster_valid_till: float = 0 self._cluster_thread_lock = Lock() self._last_lsn: int = 0 self._last_seen: int = 0 self._last_status: Dict[str, Any] = {} self._last_failsafe: Optional[Dict[str, str]] = {} self.event = Event() def client_path(self, path: str) -> str: """Construct the absolute key name from appropriate parts for the DCS type. :param path: The key name within the current Patroni cluster. :returns: absolute key name for the current Patroni cluster. """ components = [self._base_path] if self._citus_group: components.append(self._citus_group) components.append(path.lstrip('/')) return '/'.join(components) @property def initialize_path(self) -> str: """Get the client path for ``initialize``.""" return self.client_path(self._INITIALIZE) @property def config_path(self) -> str: """Get the client path for ``config``.""" return self.client_path(self._CONFIG) @property def members_path(self) -> str: """Get the client path for ``members``.""" return self.client_path(self._MEMBERS) @property def member_path(self) -> str: """Get the client path for ``member`` representing this node.""" return self.client_path(self._MEMBERS + self._name) @property def leader_path(self) -> str: """Get the client path for ``leader``.""" return self.client_path(self._LEADER) @property def failover_path(self) -> str: """Get the client path for ``failover``.""" return self.client_path(self._FAILOVER) @property def history_path(self) -> str: """Get the client path for ``history``.""" return self.client_path(self._HISTORY) @property def status_path(self) -> str: """Get the client path for ``status``.""" return self.client_path(self._STATUS) @property def leader_optime_path(self) -> str: """Get the client path for ``optime/leader`` (legacy key, superseded by ``status``).""" return self.client_path(self._LEADER_OPTIME) @property def sync_path(self) -> str: """Get the client path for ``sync``.""" return self.client_path(self._SYNC) @property def failsafe_path(self) -> str: """Get the client path for ``failsafe``.""" return self.client_path(self._FAILSAFE) @abc.abstractmethod def set_ttl(self, ttl: int) -> Optional[bool]: """Set the new *ttl* value for DCS keys.""" @property @abc.abstractmethod def ttl(self) -> int: """Get current ``ttl`` value.""" @abc.abstractmethod def set_retry_timeout(self, retry_timeout: int) -> None: """Set the new value for *retry_timeout*.""" def _set_loop_wait(self, loop_wait: int) -> None: """Set new *loop_wait* value. :param loop_wait: value to set. """ self._loop_wait = loop_wait def reload_config(self, config: Union['Config', Dict[str, Any]]) -> None: """Load and set relevant values from configuration. Sets ``loop_wait``, ``ttl`` and ``retry_timeout`` properties. :param config: Loaded configuration information object or dictionary of key value pairs. """ self._set_loop_wait(config['loop_wait']) self.set_ttl(config['ttl']) self.set_retry_timeout(config['retry_timeout']) @property def loop_wait(self) -> int: """The recorded value for cluster HA loop wait time in seconds.""" return self._loop_wait @property def last_seen(self) -> int: """The time recorded when the DCS was last reachable.""" return self._last_seen @abc.abstractmethod def _cluster_loader(self, path: Any) -> Cluster: """Load and build the :class:`Cluster` object from DCS, which represents a single Patroni or Citus cluster. :param path: the path in DCS where to load Cluster(s) from. :returns: :class:`Cluster` instance. """ @abc.abstractmethod def _citus_cluster_loader(self, path: Any) -> Dict[int, Cluster]: """Load and build all Patroni clusters from a single Citus cluster. :param path: the path in DCS where to load Cluster(s) from. :returns: all Citus groups as :class:`dict`, with group IDs as keys and :class:`Cluster` objects as values or a :class:`Cluster` object representing the coordinator with filled `Cluster.workers` attribute. """ @abc.abstractmethod def _load_cluster( self, path: str, loader: Callable[[Any], Union[Cluster, Dict[int, Cluster]]] ) -> Union[Cluster, Dict[int, Cluster]]: """Main abstract method that implements the loading of :class:`Cluster` instance. .. note:: Internally this method should call the *loader* method that will build :class:`Cluster` object which represents current state and topology of the cluster in DCS. This method supposed to be called only by the :meth:`~AbstractDCS.get_cluster` method. :param path: the path in DCS where to load Cluster(s) from. :param loader: one of :meth:`~AbstractDCS._cluster_loader` or :meth:`~AbstractDCS._citus_cluster_loader`. :raise: :exc:`~DCSError` in case of communication problems with DCS. If the current node was running as a primary and exception raised, instance would be demoted. """ def __get_patroni_cluster(self, path: Optional[str] = None) -> Cluster: """Low level method to load a :class:`Cluster` object from DCS. :param path: optional client path in DCS backend to load from. :returns: a loaded :class:`Cluster` instance. """ if path is None: path = self.client_path('') cluster = self._load_cluster(path, self._cluster_loader) if TYPE_CHECKING: # pragma: no cover assert isinstance(cluster, Cluster) return cluster def is_citus_coordinator(self) -> bool: """:class:`Cluster` instance has a Citus Coordinator group ID. :returns: ``True`` if the given node is running as Citus Coordinator (``group=0``). """ return self._citus_group == str(CITUS_COORDINATOR_GROUP_ID) def get_citus_coordinator(self) -> Optional[Cluster]: """Load the Patroni cluster for the Citus Coordinator. .. note:: This method is only executed on the worker nodes (``group!=0``) to find the coordinator. :returns: Select :class:`Cluster` instance associated with the Citus Coordinator group ID. """ try: return self.__get_patroni_cluster(f'{self._base_path}/{CITUS_COORDINATOR_GROUP_ID}/') except Exception as e: logger.error('Failed to load Citus coordinator cluster from %s: %r', self.__class__.__name__, e) return None def _get_citus_cluster(self) -> Cluster: """Load Citus cluster from DCS. :returns: A Citus :class:`Cluster` instance for the coordinator with workers clusters in the `Cluster.workers` dict. """ groups = self._load_cluster(self._base_path + '/', self._citus_cluster_loader) if TYPE_CHECKING: # pragma: no cover assert isinstance(groups, dict) cluster = groups.pop(CITUS_COORDINATOR_GROUP_ID, Cluster.empty()) cluster.workers.update(groups) return cluster def get_cluster(self) -> Cluster: """Retrieve a fresh view of DCS. .. note:: Stores copy of time, status and failsafe values for comparison in DCS update decisions. Caching is required to avoid overhead placed upon the REST API. Returns either a Citus or Patroni implementation of :class:`Cluster` depending on availability. :returns: """ try: cluster = self._get_citus_cluster() if self.is_citus_coordinator() else self.__get_patroni_cluster() except Exception: self.reset_cluster() raise self._last_seen = int(time.time()) self._last_status = {self._OPTIME: cluster.last_lsn} if cluster.slots: self._last_status['slots'] = cluster.slots self._last_failsafe = cluster.failsafe with self._cluster_thread_lock: self._cluster = cluster self._cluster_valid_till = time.time() + self.ttl return cluster @property def cluster(self) -> Optional[Cluster]: """Cached DCS cluster information that has not yet expired.""" with self._cluster_thread_lock: return self._cluster if self._cluster_valid_till > time.time() else None def reset_cluster(self) -> None: """Clear cached state of DCS.""" with self._cluster_thread_lock: self._cluster = None self._cluster_valid_till = 0 @abc.abstractmethod def _write_leader_optime(self, last_lsn: str) -> bool: """Write current WAL LSN into ``/optime/leader`` key in DCS. :param last_lsn: absolute WAL LSN in bytes. :returns: ``True`` if successfully committed to DCS. """ def write_leader_optime(self, last_lsn: int) -> None: """Write value for WAL LSN to ``optime/leader`` key in DCS. .. note:: This method abstracts away the required data structure of :meth:`~Cluster.write_status`, so it is not needed in the caller. However, the ``optime/leader`` is only written in :meth:`~Cluster.write_status` when the cluster has members with a Patroni version that is old enough to require it (i.e. the old Patroni version doesn't understand the new format). :param last_lsn: absolute WAL LSN in bytes. """ self.write_status({self._OPTIME: last_lsn}) @abc.abstractmethod def _write_status(self, value: str) -> bool: """Write current WAL LSN and ``confirmed_flush_lsn`` of permanent slots into the ``/status`` key in DCS. :param value: status serialized in JSON format. :returns: ``True`` if successfully committed to DCS. """ def write_status(self, value: Dict[str, Any]) -> None: """Write cluster status to DCS if changed. .. note:: The DCS key ``/status`` was introduced in Patroni version 2.1.0. Previous to this the position of last known leader LSN was stored in ``optime/leader``. This method has detection for backwards compatibility of members with a version older than this. :param value: JSON serializable dictionary with current WAL LSN and ``confirmed_flush_lsn`` of permanent slots. """ if not deep_compare(self._last_status, value) and self._write_status(json.dumps(value, separators=(',', ':'))): self._last_status = value cluster = self.cluster min_version = cluster and cluster.min_version if min_version and min_version < (2, 1, 0) and self._last_lsn != value[self._OPTIME]: self._last_lsn = value[self._OPTIME] self._write_leader_optime(str(value[self._OPTIME])) @abc.abstractmethod def _write_failsafe(self, value: str) -> bool: """Write current cluster topology to DCS that will be used by failsafe mechanism (if enabled). :param value: failsafe topology serialized in JSON format. :returns: ``True`` if successfully committed to DCS. """ def write_failsafe(self, value: Dict[str, str]) -> None: """Write the ``/failsafe`` key in DCS. :param value: dictionary value to set, consisting of the ``name`` and ``api_url`` of members. """ if not (isinstance(self._last_failsafe, dict) and deep_compare(self._last_failsafe, value)) \ and self._write_failsafe(json.dumps(value, separators=(',', ':'))): self._last_failsafe = value @property def failsafe(self) -> Optional[Dict[str, str]]: """Stored value of :attr:`~AbstractDCS._last_failsafe`.""" return self._last_failsafe @abc.abstractmethod def _update_leader(self, leader: Leader) -> bool: """Update ``leader`` key (or session) ttl. .. note:: You have to use CAS (Compare And Swap) operation in order to update leader key, for example for etcd ``prevValue`` parameter must be used. If update fails due to DCS not being accessible or because it is not able to process requests (hopefully temporary), the :exc:`DCSError` exception should be raised. :param leader: a reference to a current :class:`leader` object. :returns: ``True`` if ``leader`` key (or session) has been updated successfully. """ def update_leader(self, leader: Leader, last_lsn: Optional[int], slots: Optional[Dict[str, int]] = None, failsafe: Optional[Dict[str, str]] = None) -> bool: """Update ``leader`` key (or session) ttl and optime/leader. :param leader: :class:`Leader` object with information about the leader. :param last_lsn: absolute WAL LSN in bytes. :param slots: dictionary with permanent slots ``confirmed_flush_lsn``. :param failsafe: if defined dictionary passed to :meth:`~AbstractDCS.write_failsafe`. :returns: ``True`` if ``leader`` key (or session) has been updated successfully. """ ret = self._update_leader(leader) if ret and last_lsn: status: Dict[str, Any] = {self._OPTIME: last_lsn} if slots: status['slots'] = slots self.write_status(status) if ret and failsafe is not None: self.write_failsafe(failsafe) return ret @abc.abstractmethod def attempt_to_acquire_leader(self) -> bool: """Attempt to acquire leader lock. .. note:: This method should create ``/leader`` key with the value :attr:`~AbstractDCS._name`. The key must be created atomically. In case the key already exists it should not be overwritten and ``False`` must be returned. If key creation fails due to DCS not being accessible or because it is not able to process requests (hopefully temporary), the :exc:`DCSError` exception should be raised. :returns: ``True`` if key has been created successfully. """ @abc.abstractmethod def set_failover_value(self, value: str, version: Optional[Any] = None) -> bool: """Create or update ``/failover`` key. :param value: value to set. :param version: for conditional update of the key/object. :returns: ``True`` if successfully committed to DCS. """ def manual_failover(self, leader: Optional[str], candidate: Optional[str], scheduled_at: Optional[datetime.datetime] = None, version: Optional[Any] = None) -> bool: """Prepare dictionary with given values and set ``/failover`` key in DCS. :param leader: value to set for ``leader``. :param candidate: value to set for ``member``. :param scheduled_at: value converted to ISO date format for ``scheduled_at``. :param version: for conditional update of the key/object. :returns: ``True`` if successfully committed to DCS. """ failover_value = {} if leader: failover_value['leader'] = leader if candidate: failover_value['member'] = candidate if scheduled_at: failover_value['scheduled_at'] = scheduled_at.isoformat() return self.set_failover_value(json.dumps(failover_value, separators=(',', ':')), version) @abc.abstractmethod def set_config_value(self, value: str, version: Optional[Any] = None) -> bool: """Create or update ``/config`` key in DCS. :param value: new value to set in the ``config`` key. :param version: for conditional update of the key/object. :returns: ``True`` if successfully committed to DCS. """ @abc.abstractmethod def touch_member(self, data: Dict[str, Any]) -> bool: """Update member key in DCS. .. note:: This method should create or update key with the name with ``/members/`` + :attr:`~AbstractDCS._name` and the value of *data* in a given DCS. :param data: information about an instance (including connection strings). :returns: ``True`` if successfully committed to DCS. """ @abc.abstractmethod def take_leader(self) -> bool: """Establish a new leader in DCS. .. note:: This method should create leader key with value of :attr:`~AbstractDCS._name` and ``ttl`` of :attr:`~AbstractDCS.ttl`. Since it could be called only on initial cluster bootstrap it could create this key regardless, overwriting the key if necessary. :returns: ``True`` if successfully committed to DCS. """ @abc.abstractmethod def initialize(self, create_new: bool = True, sysid: str = "") -> bool: """Race for cluster initialization. This method should atomically create ``initialize`` key and return ``True``, otherwise it should return ``False``. :param create_new: ``False`` if the key should already exist (in the case we are setting the system_id). :param sysid: PostgreSQL cluster system identifier, if specified, is written to the key. :returns: ``True`` if key has been created successfully. """ @abc.abstractmethod def _delete_leader(self, leader: Leader) -> bool: """Remove leader key from DCS. This method should remove leader key if current instance is the leader. :param leader: :class:`Leader` object with information about the leader. :returns: ``True`` if successfully committed to DCS. """ def delete_leader(self, leader: Optional[Leader], last_lsn: Optional[int] = None) -> bool: """Update ``optime/leader`` and voluntarily remove leader key from DCS. This method should remove leader key if current instance is the leader. :param leader: :class:`Leader` object with information about the leader. :param last_lsn: latest checkpoint location in bytes. :returns: boolean result of called abstract :meth:`~AbstractDCS._delete_leader`. """ if last_lsn: self.write_status({self._OPTIME: last_lsn}) return bool(leader) and self._delete_leader(leader) @abc.abstractmethod def cancel_initialization(self) -> bool: """Removes the ``initialize`` key for a cluster. :returns: ``True`` if successfully committed to DCS. """ @abc.abstractmethod def delete_cluster(self) -> bool: """Delete cluster from DCS. :returns: ``True`` if successfully committed to DCS. """ @staticmethod def sync_state(leader: Optional[str], sync_standby: Optional[Collection[str]]) -> Dict[str, Any]: """Build ``sync_state`` dictionary. :param leader: name of the leader node that manages ``/sync`` key. :param sync_standby: collection of currently known synchronous standby node names. :returns: dictionary that later could be serialized to JSON or saved directly to DCS. """ return {'leader': leader, 'sync_standby': ','.join(sorted(sync_standby)) if sync_standby else None} def write_sync_state(self, leader: Optional[str], sync_standby: Optional[Collection[str]], version: Optional[Any] = None) -> Optional[SyncState]: """Write the new synchronous state to DCS. Calls :meth:`~AbstractDCS.sync_state` to build a dictionary and then calls DCS specific :meth:`~AbstractDCS.set_sync_state_value`. :param leader: name of the leader node that manages ``/sync`` key. :param sync_standby: collection of currently known synchronous standby node names. :param version: for conditional update of the key/object. :returns: the new :class:`SyncState` object or ``None``. """ sync_value = self.sync_state(leader, sync_standby) ret = self.set_sync_state_value(json.dumps(sync_value, separators=(',', ':')), version) if not isinstance(ret, bool): return SyncState.from_node(ret, sync_value) return None @abc.abstractmethod def set_history_value(self, value: str) -> bool: """Set value for ``history`` in DCS. :param value: new value of ``history`` key/object. :returns: ``True`` if successfully committed to DCS. """ @abc.abstractmethod def set_sync_state_value(self, value: str, version: Optional[Any] = None) -> Union[Any, bool]: """Set synchronous state in DCS. :param value: the new value of ``/sync`` key. :param version: for conditional update of the key/object. :returns: *version* of the new object or ``False`` in case of error. """ @abc.abstractmethod def delete_sync_state(self, version: Optional[Any] = None) -> bool: """Delete the synchronous state from DCS. :param version: for conditional deletion of the key/object. :returns: ``True`` if delete successful. """ def watch(self, leader_version: Optional[Any], timeout: float) -> bool: """Sleep if the current node is a leader, otherwise, watch for changes of leader key with a given *timeout*. :param leader_version: version of a leader key. :param timeout: timeout in seconds. :returns: if ``True`` this will reschedule the next run of the HA cycle. """ _ = leader_version self.event.wait(timeout) return self.event.is_set() patroni-3.2.2/patroni/dcs/consul.py000066400000000000000000000713071455170150700173110ustar00rootroot00000000000000from __future__ import absolute_import import json import logging import os import re import socket import ssl import time import urllib3 from collections import defaultdict from consul import ConsulException, NotFound, base from http.client import HTTPException from urllib3.exceptions import HTTPError from urllib.parse import urlencode, urlparse, quote from typing import Any, Callable, Dict, List, Mapping, NamedTuple, Optional, Union, Tuple, TYPE_CHECKING from . import AbstractDCS, Cluster, ClusterConfig, Failover, Leader, Member, Status, SyncState, \ TimelineHistory, ReturnFalseException, catch_return_false_exception, citus_group_re from ..exceptions import DCSError from ..utils import deep_compare, parse_bool, Retry, RetryFailedError, split_host_port, uri, USER_AGENT if TYPE_CHECKING: # pragma: no cover from ..config import Config logger = logging.getLogger(__name__) class ConsulError(DCSError): pass class ConsulInternalError(ConsulException): """An internal Consul server error occurred""" class InvalidSessionTTL(ConsulException): """Session TTL is too small or too big""" class InvalidSession(ConsulException): """invalid session""" class Response(NamedTuple): code: int headers: Union[Mapping[str, str], Mapping[bytes, bytes], None] body: str content: bytes class HTTPClient(object): def __init__(self, host: str = '127.0.0.1', port: int = 8500, token: Optional[str] = None, scheme: str = 'http', verify: bool = True, cert: Optional[str] = None, ca_cert: Optional[str] = None) -> None: self.token = token self._read_timeout = 10 self.base_uri = uri(scheme, (host, port)) kwargs = {} if cert: if isinstance(cert, tuple): # Key and cert are separate kwargs['cert_file'] = cert[0] kwargs['key_file'] = cert[1] else: # combined certificate kwargs['cert_file'] = cert if ca_cert: kwargs['ca_certs'] = ca_cert kwargs['cert_reqs'] = ssl.CERT_REQUIRED if verify or ca_cert else ssl.CERT_NONE self.http = urllib3.PoolManager(num_pools=10, maxsize=10, headers={}, **kwargs) self._ttl = 30 def set_read_timeout(self, timeout: float) -> None: self._read_timeout = timeout / 3.0 @property def ttl(self) -> int: return self._ttl def set_ttl(self, ttl: int) -> bool: ret = self._ttl != ttl self._ttl = ttl return ret @staticmethod def response(response: urllib3.response.HTTPResponse) -> Response: content = response.data body = content.decode('utf-8') if response.status == 500: msg = '{0} {1}'.format(response.status, body) if body.startswith('Invalid Session TTL'): raise InvalidSessionTTL(msg) elif body.startswith('invalid session'): raise InvalidSession(msg) else: raise ConsulInternalError(msg) return Response(response.status, response.headers, body, content) def uri(self, path: str, params: Union[None, Dict[str, Any], List[Tuple[str, Any]], Tuple[Tuple[str, Any], ...]] = None) -> str: return '{0}{1}{2}'.format(self.base_uri, path, params and '?' + urlencode(params) or '') def __getattr__(self, method: str) -> Callable[[Callable[[Response], Union[bool, Any, Tuple[str, Any]]], str, Union[None, Dict[str, Any], List[Tuple[str, Any]]], str, Optional[Dict[str, str]]], Union[bool, Any, Tuple[str, Any]]]: if method not in ('get', 'post', 'put', 'delete'): raise AttributeError("HTTPClient instance has no attribute '{0}'".format(method)) def wrapper(callback: Callable[[Response], Union[bool, Any, Tuple[str, Any]]], path: str, params: Union[None, Dict[str, Any], List[Tuple[str, Any]]] = None, data: str = '', headers: Optional[Dict[str, str]] = None) -> Union[bool, Any, Tuple[str, Any]]: # python-consul doesn't allow to specify ttl smaller then 10 seconds # because session_ttl_min defaults to 10s, so we have to do this ugly dirty hack... if method == 'put' and path == '/v1/session/create': ttl = '"ttl": "{0}s"'.format(self._ttl) if not data or data == '{}': data = '{' + ttl + '}' else: data = data[:-1] + ', ' + ttl + '}' if isinstance(params, list): # starting from v1.1.0 python-consul switched from `dict` to `list` for params params = {k: v for k, v in params} kwargs: Dict[str, Any] = {'retries': 0, 'preload_content': False, 'body': data} if method == 'get' and isinstance(params, dict) and 'index' in params: timeout = float(params['wait'][:-1]) if 'wait' in params else 300 # According to the documentation a small random amount of additional wait time is added to the # supplied maximum wait time to spread out the wake up time of any concurrent requests. This adds # up to wait / 16 additional time to the maximum duration. Since our goal is actually getting a # response rather read timeout we will add to the timeout a slightly bigger value. kwargs['timeout'] = timeout + max(timeout / 15.0, 1) else: kwargs['timeout'] = self._read_timeout kwargs['headers'] = (headers or {}).copy() kwargs['headers'].update(urllib3.make_headers(user_agent=USER_AGENT)) token = params.pop('token', self.token) if isinstance(params, dict) else self.token if token: kwargs['headers']['X-Consul-Token'] = token return callback(self.response(self.http.request(method.upper(), self.uri(path, params), **kwargs))) return wrapper class ConsulClient(base.Consul): def __init__(self, *args: Any, **kwargs: Any) -> None: """ Consul client with Patroni customisations. .. note:: Parameters, *token*, *cert* and *ca_cert* are not passed to the parent class :class:`consul.base.Consul`. Original class documentation, *token* is an optional ``ACL token``. If supplied it will be used by default for all requests made with this client session. It's still possible to override this token by passing a token explicitly for a request. *consistency* sets the consistency mode to use by default for all reads that support the consistency option. It's still possible to override this by passing explicitly for a given request. *consistency* can be either 'default', 'consistent' or 'stale'. *dc* is the datacenter that this agent will communicate with. By default, the datacenter of the host is used. *verify* is whether to verify the SSL certificate for HTTPS requests *cert* client side certificates for HTTPS requests :param args: positional arguments to pass to :class:`consul.base.Consul` :param kwargs: keyword arguments, with *cert*, *ca_cert* and *token* removed, passed to :class:`consul.base.Consul` """ self._cert = kwargs.pop('cert', None) self._ca_cert = kwargs.pop('ca_cert', None) self.token = kwargs.get('token') super(ConsulClient, self).__init__(*args, **kwargs) def http_connect(self, *args: Any, **kwargs: Any) -> HTTPClient: kwargs.update(dict(zip(['host', 'port', 'scheme', 'verify'], args))) if self._cert: kwargs['cert'] = self._cert if self._ca_cert: kwargs['ca_cert'] = self._ca_cert if self.token: kwargs['token'] = self.token return HTTPClient(**kwargs) def connect(self, *args: Any, **kwargs: Any) -> HTTPClient: return self.http_connect(*args, **kwargs) def reload_config(self, config: Dict[str, Any]) -> None: self.http.token = self.token = config.get('token') self.consistency = config.get('consistency', 'default') self.dc = config.get('dc') def catch_consul_errors(func: Callable[..., Any]) -> Callable[..., Any]: def wrapper(*args: Any, **kwargs: Any) -> Any: try: return func(*args, **kwargs) except (RetryFailedError, ConsulException, HTTPException, HTTPError, socket.error, socket.timeout): return False return wrapper def force_if_last_failed(func: Callable[..., Any]) -> Callable[..., Any]: def wrapper(*args: Any, **kwargs: Any) -> Any: if getattr(wrapper, 'last_result', None) is False: kwargs['force'] = True last_result = func(*args, **kwargs) setattr(wrapper, 'last_result', last_result) return last_result setattr(wrapper, 'last_result', None) return wrapper def service_name_from_scope_name(scope_name: str) -> str: """Translate scope name to service name which can be used in dns. 230 = 253 - len('replica.') - len('.service.consul') """ def replace_char(match: Any) -> str: c = match.group(0) return '-' if c in '. _' else "u{:04d}".format(ord(c)) service_name = re.sub(r'[^a-z0-9\-]', replace_char, scope_name.lower()) return service_name[0:230] class Consul(AbstractDCS): def __init__(self, config: Dict[str, Any]) -> None: super(Consul, self).__init__(config) self._base_path = self._base_path[1:] self._scope = config['scope'] self._session = None self.__do_not_watch = False self._retry = Retry(deadline=config['retry_timeout'], max_delay=1, max_tries=-1, retry_exceptions=(ConsulInternalError, HTTPException, HTTPError, socket.error, socket.timeout)) if 'url' in config: url: str = config['url'] r = urlparse(url) config.update({'scheme': r.scheme, 'host': r.hostname, 'port': r.port or 8500}) elif 'host' in config: host, port = split_host_port(config.get('host', '127.0.0.1:8500'), 8500) config['host'] = host if 'port' not in config: config['port'] = int(port) if config.get('cacert'): config['ca_cert'] = config.pop('cacert') if config.get('key') and config.get('cert'): config['cert'] = (config['cert'], config['key']) config_keys = ('host', 'port', 'token', 'scheme', 'cert', 'ca_cert', 'dc', 'consistency') kwargs: Dict[str, Any] = {p: config.get(p) for p in config_keys if config.get(p)} verify = config.get('verify') if not isinstance(verify, bool): verify = parse_bool(verify) if isinstance(verify, bool): kwargs['verify'] = verify self._client = ConsulClient(**kwargs) self.set_retry_timeout(config['retry_timeout']) self.set_ttl(config.get('ttl') or 30) self._last_session_refresh = 0 self.__session_checks = config.get('checks', []) self._register_service = config.get('register_service', False) self._previous_loop_register_service = self._register_service self._service_tags = sorted(config.get('service_tags', [])) self._previous_loop_service_tags = self._service_tags if self._register_service: self._set_service_name() self._service_check_interval = config.get('service_check_interval', '5s') self._service_check_tls_server_name = config.get('service_check_tls_server_name', None) if not self._ctl: self.create_session() self._previous_loop_token = self._client.token def retry(self, method: Callable[..., Any], *args: Any, **kwargs: Any) -> Any: return self._retry.copy()(method, *args, **kwargs) def create_session(self) -> None: while not self._session: try: self.refresh_session() except ConsulError: logger.info('waiting on consul') time.sleep(5) def reload_config(self, config: Union['Config', Dict[str, Any]]) -> None: super(Consul, self).reload_config(config) consul_config = config.get('consul', {}) self._client.reload_config(consul_config) self._previous_loop_service_tags = self._service_tags self._service_tags: List[str] = consul_config.get('service_tags', []) self._service_tags.sort() should_register_service = consul_config.get('register_service', False) if should_register_service and not self._register_service: self._set_service_name() self._previous_loop_register_service = self._register_service self._register_service = should_register_service def set_ttl(self, ttl: int) -> Optional[bool]: if self._client.http.set_ttl(ttl / 2.0): # Consul multiplies the TTL by 2x self._session = None self.__do_not_watch = True return None @property def ttl(self) -> int: return self._client.http.ttl * 2 # we multiply the value by 2 because it was divided in the `set_ttl()` method def set_retry_timeout(self, retry_timeout: int) -> None: self._retry.deadline = retry_timeout self._client.http.set_read_timeout(retry_timeout) def adjust_ttl(self) -> None: try: settings = self._client.agent.self() min_ttl = (settings['Config']['SessionTTLMin'] or 10000000000) / 1000000000.0 logger.warning('Changing Session TTL from %s to %s', self._client.http.ttl, min_ttl) self._client.http.set_ttl(min_ttl) except Exception: logger.exception('adjust_ttl') def _do_refresh_session(self, force: bool = False) -> bool: """:returns: `!True` if it had to create new session""" if not force and self._session and self._last_session_refresh + self._loop_wait > time.time(): return False if self._session: try: self._client.session.renew(self._session) except NotFound: self._session = None ret = not self._session if ret: try: self._session = self._client.session.create(name=self._scope + '-' + self._name, checks=self.__session_checks, lock_delay=0.001, behavior='delete') except InvalidSessionTTL: logger.exception('session.create') self.adjust_ttl() raise self._last_session_refresh = time.time() return ret def refresh_session(self) -> bool: try: return self.retry(self._do_refresh_session) except (ConsulException, RetryFailedError): logger.exception('refresh_session') raise ConsulError('Failed to renew/create session') @staticmethod def member(node: Dict[str, str]) -> Member: return Member.from_node(node['ModifyIndex'], os.path.basename(node['Key']), node.get('Session'), node['Value']) def _cluster_from_nodes(self, nodes: Dict[str, Any]) -> Cluster: # get initialize flag initialize = nodes.get(self._INITIALIZE) initialize = initialize and initialize['Value'] # get global dynamic configuration config = nodes.get(self._CONFIG) config = config and ClusterConfig.from_node(config['ModifyIndex'], config['Value']) # get timeline history history = nodes.get(self._HISTORY) history = history and TimelineHistory.from_node(history['ModifyIndex'], history['Value']) # get last known leader lsn and slots status = nodes.get(self._STATUS) or nodes.get(self._LEADER_OPTIME) status = Status.from_node(status and status['Value']) # get list of members members = [self.member(n) for k, n in nodes.items() if k.startswith(self._MEMBERS) and k.count('/') == 1] # get leader leader = nodes.get(self._LEADER) if leader: member = Member(-1, leader['Value'], None, {}) member = ([m for m in members if m.name == leader['Value']] or [member])[0] leader = Leader(leader['ModifyIndex'], leader.get('Session'), member) # failover key failover = nodes.get(self._FAILOVER) if failover: failover = Failover.from_node(failover['ModifyIndex'], failover['Value']) # get synchronization state sync = nodes.get(self._SYNC) sync = SyncState.from_node(sync and sync['ModifyIndex'], sync and sync['Value']) # get failsafe topology failsafe = nodes.get(self._FAILSAFE) try: failsafe = json.loads(failsafe['Value']) if failsafe else None except Exception: failsafe = None return Cluster(initialize, config, leader, status, members, failover, sync, history, failsafe) @property def _consistency(self) -> str: return 'consistent' if self._ctl else self._client.consistency def _cluster_loader(self, path: str) -> Cluster: _, results = self.retry(self._client.kv.get, path, recurse=True, consistency=self._consistency) if results is None: return Cluster.empty() nodes: Dict[str, Dict[str, Any]] = {} for node in results: node['Value'] = (node['Value'] or b'').decode('utf-8') nodes[node['Key'][len(path):]] = node return self._cluster_from_nodes(nodes) def _citus_cluster_loader(self, path: str) -> Dict[int, Cluster]: _, results = self.retry(self._client.kv.get, path, recurse=True, consistency=self._consistency) clusters: Dict[int, Dict[str, Cluster]] = defaultdict(dict) for node in results or []: key = node['Key'][len(path):].split('/', 1) if len(key) == 2 and citus_group_re.match(key[0]): node['Value'] = (node['Value'] or b'').decode('utf-8') clusters[int(key[0])][key[1]] = node return {group: self._cluster_from_nodes(nodes) for group, nodes in clusters.items()} def _load_cluster( self, path: str, loader: Callable[[str], Union[Cluster, Dict[int, Cluster]]] ) -> Union[Cluster, Dict[int, Cluster]]: try: return loader(path) except Exception: logger.exception('get_cluster') raise ConsulError('Consul is not responding properly') @catch_consul_errors def touch_member(self, data: Dict[str, Any]) -> bool: cluster = self.cluster member = cluster and cluster.get_member(self._name, fallback_to_leader=False) try: create_member = self.refresh_session() except DCSError: return False if member and (create_member or member.session != self._session): self._client.kv.delete(self.member_path) create_member = True if self._register_service or self._previous_loop_register_service: try: self.update_service(not create_member and member and member.data or {}, data) except Exception: logger.exception('update_service') if not create_member and member and deep_compare(data, member.data): return True try: self._client.kv.put(self.member_path, json.dumps(data, separators=(',', ':')), acquire=self._session) return True except InvalidSession: self._session = None logger.error('Our session disappeared from Consul, can not "touch_member"') except Exception: logger.exception('touch_member') return False def _set_service_name(self) -> None: self._service_name = service_name_from_scope_name(self._scope) if self._scope != self._service_name: logger.warning('Using %s as consul service name instead of scope name %s', self._service_name, self._scope) @catch_consul_errors def register_service(self, service_name: str, **kwargs: Any) -> bool: logger.info('Register service %s, params %s', service_name, kwargs) return self._client.agent.service.register(service_name, **kwargs) @catch_consul_errors def deregister_service(self, service_id: str) -> bool: logger.info('Deregister service %s', service_id) # service_id can contain special characters, but is used as part of uri in deregister request service_id = quote(service_id) return self._client.agent.service.deregister(service_id) def _update_service(self, data: Dict[str, Any]) -> Optional[bool]: service_name = self._service_name role = data['role'].replace('_', '-') state = data['state'] api_url: str = data['api_url'] api_parts = urlparse(api_url) api_parts = api_parts._replace(path='/{0}'.format(role)) conn_url: str = data['conn_url'] conn_parts = urlparse(conn_url) check = base.Check.http(api_parts.geturl(), self._service_check_interval, deregister='{0}s'.format(self._client.http.ttl * 10)) if self._service_check_tls_server_name is not None: check['TLSServerName'] = self._service_check_tls_server_name tags = self._service_tags[:] tags.append(role) if role == 'master': tags.append('primary') elif role == 'primary': tags.append('master') self._previous_loop_service_tags = self._service_tags self._previous_loop_token = self._client.token params = { 'service_id': '{0}/{1}'.format(self._scope, self._name), 'address': conn_parts.hostname, 'port': conn_parts.port, 'check': check, 'tags': tags, 'enable_tag_override': True, } if state == 'stopped' or (not self._register_service and self._previous_loop_register_service): self._previous_loop_register_service = self._register_service return self.deregister_service(params['service_id']) self._previous_loop_register_service = self._register_service if role in ['master', 'primary', 'replica', 'standby-leader']: if state != 'running': return return self.register_service(service_name, **params) logger.warning('Could not register service: unknown role type %s', role) @force_if_last_failed def update_service(self, old_data: Dict[str, Any], new_data: Dict[str, Any], force: bool = False) -> Optional[bool]: update = False for key in ['role', 'api_url', 'conn_url', 'state']: if key not in new_data: logger.warning('Could not register service: not enough params in member data') return if old_data.get(key) != new_data[key]: update = True if ( force or update or self._register_service != self._previous_loop_register_service or self._service_tags != self._previous_loop_service_tags or self._client.token != self._previous_loop_token ): return self._update_service(new_data) def _do_attempt_to_acquire_leader(self, retry: Retry) -> bool: try: return retry(self._client.kv.put, self.leader_path, self._name, acquire=self._session) except InvalidSession: logger.error('Our session disappeared from Consul. Will try to get a new one and retry attempt') self._session = None retry.ensure_deadline(0) retry(self._do_refresh_session) retry.ensure_deadline(1, ConsulError('_do_attempt_to_acquire_leader timeout')) return retry(self._client.kv.put, self.leader_path, self._name, acquire=self._session) @catch_return_false_exception def attempt_to_acquire_leader(self) -> bool: retry = self._retry.copy() self._run_and_handle_exceptions(self._do_refresh_session, retry=retry) retry.ensure_deadline(1, ConsulError('attempt_to_acquire_leader timeout')) ret = self._run_and_handle_exceptions(self._do_attempt_to_acquire_leader, retry, retry=None) if not ret: logger.info('Could not take out TTL lock') return ret def take_leader(self) -> bool: return self.attempt_to_acquire_leader() @catch_consul_errors def set_failover_value(self, value: str, version: Optional[int] = None) -> bool: return self._client.kv.put(self.failover_path, value, cas=version) @catch_consul_errors def set_config_value(self, value: str, version: Optional[int] = None) -> bool: return self._client.kv.put(self.config_path, value, cas=version) @catch_consul_errors def _write_leader_optime(self, last_lsn: str) -> bool: return self._client.kv.put(self.leader_optime_path, last_lsn) @catch_consul_errors def _write_status(self, value: str) -> bool: return self._client.kv.put(self.status_path, value) @catch_consul_errors def _write_failsafe(self, value: str) -> bool: return self._client.kv.put(self.failsafe_path, value) @staticmethod def _run_and_handle_exceptions(method: Callable[..., Any], *args: Any, **kwargs: Any) -> Any: retry = kwargs.pop('retry', None) try: return retry(method, *args, **kwargs) if retry else method(*args, **kwargs) except (RetryFailedError, InvalidSession, HTTPException, HTTPError, socket.error, socket.timeout) as e: raise ConsulError(e) except ConsulException: raise ReturnFalseException @catch_return_false_exception def _update_leader(self, leader: Leader) -> bool: retry = self._retry.copy() self._run_and_handle_exceptions(self._do_refresh_session, True, retry=retry) if self._session and leader.session != self._session: retry.ensure_deadline(1, ConsulError('update_leader timeout')) logger.warning('Recreating the leader key due to session mismatch') self._run_and_handle_exceptions(self._client.kv.delete, self.leader_path, cas=leader.version) retry.ensure_deadline(0.5, ConsulError('update_leader timeout')) self._run_and_handle_exceptions(self._client.kv.put, self.leader_path, self._name, acquire=self._session) return bool(self._session) @catch_consul_errors def initialize(self, create_new: bool = True, sysid: str = '') -> bool: kwargs = {'cas': 0} if create_new else {} return self.retry(self._client.kv.put, self.initialize_path, sysid, **kwargs) @catch_consul_errors def cancel_initialization(self) -> bool: return self.retry(self._client.kv.delete, self.initialize_path) @catch_consul_errors def delete_cluster(self) -> bool: return self.retry(self._client.kv.delete, self.client_path(''), recurse=True) @catch_consul_errors def set_history_value(self, value: str) -> bool: return self._client.kv.put(self.history_path, value) @catch_consul_errors def _delete_leader(self, leader: Leader) -> bool: return self._client.kv.delete(self.leader_path, cas=int(leader.version)) @catch_consul_errors def set_sync_state_value(self, value: str, version: Optional[int] = None) -> Union[int, bool]: retry = self._retry.copy() ret = retry(self._client.kv.put, self.sync_path, value, cas=version) if ret: # We have no other choise, only read after write :( if not retry.ensure_deadline(0.5): return False _, ret = self.retry(self._client.kv.get, self.sync_path, consistency='consistent') if ret and (ret.get('Value') or b'').decode('utf-8') == value: return ret['ModifyIndex'] return False @catch_consul_errors def delete_sync_state(self, version: Optional[int] = None) -> bool: return self.retry(self._client.kv.delete, self.sync_path, cas=version) def watch(self, leader_version: Optional[int], timeout: float) -> bool: self._last_session_refresh = 0 if self.__do_not_watch: self.__do_not_watch = False return True if leader_version: end_time = time.time() + timeout while timeout >= 1: try: idx, _ = self._client.kv.get(self.leader_path, index=leader_version, wait=str(timeout) + 's') return str(idx) != str(leader_version) except (ConsulException, HTTPException, HTTPError, socket.error, socket.timeout): logger.exception('watch') timeout = end_time - time.time() try: return super(Consul, self).watch(None, timeout) finally: self.event.clear() patroni-3.2.2/patroni/dcs/etcd.py000066400000000000000000001121061455170150700167160ustar00rootroot00000000000000from __future__ import absolute_import import abc import etcd import json import logging import os import urllib3.util.connection import random import socket import time from collections import defaultdict from copy import deepcopy from dns.exception import DNSException from dns import resolver from http.client import HTTPException from queue import Queue from threading import Thread from typing import Any, Callable, Collection, Dict, List, Optional, Union, Tuple, Type, TYPE_CHECKING from urllib.parse import urlparse from urllib3 import Timeout from urllib3.exceptions import HTTPError, ReadTimeoutError, ProtocolError from . import AbstractDCS, Cluster, ClusterConfig, Failover, Leader, Member, Status, SyncState, \ TimelineHistory, ReturnFalseException, catch_return_false_exception, citus_group_re from ..exceptions import DCSError from ..request import get as requests_get from ..utils import Retry, RetryFailedError, split_host_port, uri, USER_AGENT if TYPE_CHECKING: # pragma: no cover from ..config import Config logger = logging.getLogger(__name__) class EtcdRaftInternal(etcd.EtcdException): """Raft Internal Error""" class EtcdError(DCSError): pass _AddrInfo = Tuple[socket.AddressFamily, socket.SocketKind, int, str, Union[Tuple[str, int], Tuple[str, int, int, int]]] class DnsCachingResolver(Thread): def __init__(self, cache_time: float = 600.0, cache_fail_time: float = 30.0) -> None: super(DnsCachingResolver, self).__init__() self._cache: Dict[Tuple[str, int], Tuple[float, List[_AddrInfo]]] = {} self._cache_time = cache_time self._cache_fail_time = cache_fail_time self._resolve_queue: Queue[Tuple[Tuple[str, int], int]] = Queue() self.daemon = True self.start() def run(self) -> None: while True: (host, port), attempt = self._resolve_queue.get() response = self._do_resolve(host, port) if response: self._cache[(host, port)] = (time.time(), response) else: if attempt < 10: self.resolve_async(host, port, attempt + 1) time.sleep(1) def resolve(self, host: str, port: int) -> List[_AddrInfo]: current_time = time.time() cached_time, response = self._cache.get((host, port), (0, [])) time_passed = current_time - cached_time if time_passed > self._cache_time or (not response and time_passed > self._cache_fail_time): new_response = self._do_resolve(host, port) if new_response: self._cache[(host, port)] = (current_time, new_response) response = new_response return response def resolve_async(self, host: str, port: int, attempt: int = 0) -> None: self._resolve_queue.put(((host, port), attempt)) def remove(self, host: str, port: int) -> None: self._cache.pop((host, port), None) @staticmethod def _do_resolve(host: str, port: int) -> List[_AddrInfo]: try: return socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM, socket.IPPROTO_TCP) except Exception as e: logger.warning('failed to resolve host %s: %s', host, e) return [] class AbstractEtcdClientWithFailover(abc.ABC, etcd.Client): ERROR_CLS: Type[Exception] def __init__(self, config: Dict[str, Any], dns_resolver: DnsCachingResolver, cache_ttl: int = 300) -> None: self._dns_resolver = dns_resolver self.set_machines_cache_ttl(cache_ttl) self._machines_cache_updated = 0 kwargs = {p: config.get(p) for p in ('host', 'port', 'protocol', 'use_proxies', 'version_prefix', 'username', 'password', 'cert', 'ca_cert') if config.get(p)} super(AbstractEtcdClientWithFailover, self).__init__(read_timeout=config['retry_timeout'], **kwargs) # For some reason python3-etcd on debian and ubuntu are not based on the latest version # Workaround for the case when https://github.com/jplana/python-etcd/pull/196 is not applied self.http.connection_pool_kw.pop('ssl_version', None) self._config = config self._load_machines_cache() self._allow_reconnect = True # allow passing retry argument to api_execute in params self._comparison_conditions.add('retry') self._read_options.add('retry') self._del_conditions.add('retry') def _calculate_timeouts(self, etcd_nodes: int, timeout: Optional[float] = None) -> Tuple[int, float, int]: """Calculate a request timeout and number of retries per single etcd node. In case if the timeout per node is too small (less than one second) we will reduce the number of nodes. For the cluster with only one node we will try to do 2 retries. For clusters with 2 nodes we will try to do 1 retry for every node. No retries for clusters with 3 or more nodes. We better rely on switching to a different node.""" per_node_timeout = timeout = float(timeout or self.read_timeout) max_retries = 4 - min(etcd_nodes, 3) per_node_retries = 1 min_timeout = 1.0 while etcd_nodes > 0: per_node_timeout = float(timeout) / etcd_nodes if per_node_timeout >= min_timeout: # for small clusters we will try to do more than on try on every node while per_node_retries < max_retries and per_node_timeout / (per_node_retries + 1) >= min_timeout: per_node_retries += 1 per_node_timeout /= per_node_retries break # if the timeout per one node is to small try to reduce number of nodes etcd_nodes -= 1 max_retries = 1 return etcd_nodes, per_node_timeout, per_node_retries - 1 def reload_config(self, config: Dict[str, Any]) -> None: self.username = config.get('username') self.password = config.get('password') def _get_headers(self) -> Dict[str, str]: basic_auth = ':'.join((self.username, self.password)) if self.username and self.password else None return urllib3.make_headers(basic_auth=basic_auth, user_agent=USER_AGENT) def _prepare_common_parameters(self, etcd_nodes: int, timeout: Optional[float] = None) -> Dict[str, Any]: kwargs: Dict[str, Any] = {'headers': self._get_headers(), 'redirect': self.allow_redirect, 'preload_content': False} if timeout is not None: kwargs.update(retries=0, timeout=timeout) else: _, per_node_timeout, per_node_retries = self._calculate_timeouts(etcd_nodes) connect_timeout = max(1.0, per_node_timeout / 2.0) kwargs.update(timeout=Timeout(connect=connect_timeout, total=per_node_timeout), retries=per_node_retries) return kwargs def set_machines_cache_ttl(self, cache_ttl: int) -> None: self._machines_cache_ttl = cache_ttl @abc.abstractmethod def _prepare_get_members(self, etcd_nodes: int) -> Dict[str, Any]: """returns: request parameters""" @abc.abstractmethod def _get_members(self, base_uri: str, **kwargs: Any) -> List[str]: """returns: list of clientURLs""" @property def machines_cache(self) -> List[str]: base_uri, cache = self._base_uri, self._machines_cache return ([base_uri] if base_uri in cache else []) + [machine for machine in cache if machine != base_uri] def _get_machines_list(self, machines_cache: List[str]) -> List[str]: """Gets list of members from Etcd cluster using API :param machines_cache: initial list of Etcd members :returns: list of clientURLs retrieved from Etcd cluster :raises EtcdConnectionFailed: if failed""" kwargs = self._prepare_get_members(len(machines_cache)) for base_uri in machines_cache: try: machines = list(set(self._get_members(base_uri, **kwargs))) logger.debug("Retrieved list of machines: %s", machines) if machines: random.shuffle(machines) if not self._use_proxies: self._update_dns_cache(self._dns_resolver.resolve_async, machines) return machines except Exception as e: self.http.clear() logger.error("Failed to get list of machines from %s%s: %r", base_uri, self.version_prefix, e) raise etcd.EtcdConnectionFailed('No more machines in the cluster') @property def machines(self) -> List[str]: """Original `machines` method(property) of `etcd.Client` class raise exception when it failed to get list of etcd cluster members. This method is being called only when request failed on one of the etcd members during `api_execute` call. For us it's more important to execute original request rather then get new topology of etcd cluster. So we will catch this exception and return empty list of machines. Later, during next `api_execute` call we will forcefully update machines_cache. Also this method implements the same timeout-retry logic as `api_execute`, because the original method was retrying 2 times with the `read_timeout` on each node. After the next refactoring the whole logic was moved to the _get_machines_list() method.""" return self._get_machines_list(self.machines_cache) def set_read_timeout(self, timeout: float) -> None: self._read_timeout = timeout def _do_http_request(self, retry: Optional[Retry], machines_cache: List[str], request_executor: Callable[..., urllib3.response.HTTPResponse], method: str, path: str, fields: Optional[Dict[str, Any]] = None, **kwargs: Any) -> urllib3.response.HTTPResponse: is_watch_request = isinstance(fields, dict) and fields.get('wait') == 'true' if fields is not None: kwargs['fields'] = fields some_request_failed = False for i, base_uri in enumerate(machines_cache): if i > 0: logger.info("Retrying on %s", base_uri) try: response = request_executor(method, base_uri + path, **kwargs) response.data.decode('utf-8') if some_request_failed: self.set_base_uri(base_uri) self._refresh_machines_cache() return response except (HTTPError, HTTPException, socket.error, socket.timeout) as e: self.http.clear() if not retry: if len(machines_cache) == 1: self.set_base_uri(self._base_uri) # trigger Etcd3 watcher restart # switch to the next etcd node because we don't know exactly what happened, # whether the key didn't received an update or there is a network problem. elif i + 1 < len(machines_cache): self.set_base_uri(machines_cache[i + 1]) if is_watch_request and isinstance(e, (ReadTimeoutError, ProtocolError)): logger.debug("Watch timed out.") raise etcd.EtcdWatchTimedOut("Watch timed out: {0}".format(e), cause=e) logger.error("Request to server %s failed: %r", base_uri, e) logger.info("Reconnection allowed, looking for another server.") if not retry: raise etcd.EtcdException('{0} {1} request failed'.format(method, path)) some_request_failed = True raise etcd.EtcdConnectionFailed('No more machines in the cluster') @abc.abstractmethod def _prepare_request(self, kwargs: Dict[str, Any], params: Optional[Dict[str, Any]] = None, method: Optional[str] = None) -> Callable[..., urllib3.response.HTTPResponse]: """returns: request_executor""" def api_execute(self, path: str, method: str, params: Optional[Dict[str, Any]] = None, timeout: Optional[float] = None) -> Any: retry = params.pop('retry', None) if isinstance(params, dict) else None # Update machines_cache if previous attempt of update has failed if self._update_machines_cache: self._load_machines_cache() elif not self._use_proxies and time.time() - self._machines_cache_updated > self._machines_cache_ttl: self._refresh_machines_cache() machines_cache = self.machines_cache etcd_nodes = len(machines_cache) kwargs = self._prepare_common_parameters(etcd_nodes, timeout) request_executor = self._prepare_request(kwargs, params, method) while True: try: response = self._do_http_request(retry, machines_cache, request_executor, method, path, **kwargs) return self._handle_server_response(response) except etcd.EtcdWatchTimedOut: raise except etcd.EtcdConnectionFailed as ex: try: if self._load_machines_cache(): machines_cache = self.machines_cache etcd_nodes = len(machines_cache) except Exception as e: logger.debug('Failed to update list of etcd nodes: %r', e) if TYPE_CHECKING: # pragma: no cover assert isinstance(retry, Retry) # etcd.EtcdConnectionFailed is raised only if retry is not None! sleeptime = retry.sleeptime remaining_time = retry.stoptime - sleeptime - time.time() nodes, timeout, retries = self._calculate_timeouts(etcd_nodes, remaining_time) if nodes == 0: self._update_machines_cache = True self.set_base_uri(self._base_uri) # trigger Etcd3 watcher restart raise ex retry.sleep_func(sleeptime) retry.update_delay() # We still have some time left. Partially reduce `machines_cache` and retry request kwargs.update(timeout=Timeout(connect=max(1.0, timeout / 2.0), total=timeout), retries=retries) machines_cache = machines_cache[:nodes] @staticmethod def get_srv_record(host: str) -> List[Tuple[str, int]]: try: return [(r.target.to_text(True), r.port) for r in resolver.query(host, 'SRV')] except DNSException: return [] def _get_machines_cache_from_srv(self, srv: str, srv_suffix: Optional[str] = None) -> List[str]: """Fetch list of etcd-cluster member by resolving _etcd-server._tcp. SRV record. This record should contain list of host and peer ports which could be used to run 'GET http://{host}:{port}/members' request (peer protocol)""" ret: List[str] = [] for r in ['-client-ssl', '-client', '-ssl', '', '-server-ssl', '-server']: r = '{0}-{1}'.format(r, srv_suffix) if srv_suffix else r protocol = 'https' if '-ssl' in r else 'http' endpoint = '/members' if '-server' in r else '' for host, port in self.get_srv_record('_etcd{0}._tcp.{1}'.format(r, srv)): url = uri(protocol, (host, port), endpoint) if endpoint: try: response = requests_get(url, timeout=self.read_timeout, verify=False) if response.status < 400: for member in json.loads(response.data.decode('utf-8')): ret.extend(member['clientURLs']) break except Exception: logger.exception('GET %s', url) else: ret.append(url) if ret: self._protocol = protocol break else: logger.warning('Can not resolve SRV for %s', srv) return list(set(ret)) def _get_machines_cache_from_dns(self, host: str, port: int) -> List[str]: """One host might be resolved into multiple ip addresses. We will make list out of it""" if self.protocol == 'http': ret = [uri(self.protocol, res[-1][:2]) for res in self._dns_resolver.resolve(host, port)] if ret: return list(set(ret)) return [uri(self.protocol, (host, port))] def _get_machines_cache_from_config(self) -> List[str]: if 'proxy' in self._config: return [uri(self.protocol, (self._config['host'], self._config['port']))] machines_cache = [] if 'srv' in self._config: machines_cache = self._get_machines_cache_from_srv(self._config['srv'], self._config.get('srv_suffix')) if not machines_cache and 'hosts' in self._config: machines_cache = list(self._config['hosts']) if not machines_cache and 'host' in self._config: machines_cache = self._get_machines_cache_from_dns(self._config['host'], self._config['port']) return machines_cache @staticmethod def _update_dns_cache(func: Callable[[str, int], None], machines: List[str]) -> None: for url in machines: r = urlparse(url) if r.hostname: port = r.port or (443 if r.scheme == 'https' else 80) func(r.hostname, port) def _load_machines_cache(self) -> bool: """This method should fill up `_machines_cache` from scratch. It could happen only in two cases: 1. During class initialization 2. When all etcd members failed""" self._update_machines_cache = True if 'srv' not in self._config and 'host' not in self._config and 'hosts' not in self._config: raise Exception('Neither srv, hosts, host nor url are defined in etcd section of config') machines_cache = self._get_machines_cache_from_config() # Can not bootstrap list of etcd-cluster members, giving up if not machines_cache: raise etcd.EtcdException # enforce resolving dns name,they might get new ips self._update_dns_cache(self._dns_resolver.remove, machines_cache) # after filling up the initial list of machines_cache we should ask etcd-cluster about actual list ret = self._refresh_machines_cache(machines_cache) self._update_machines_cache = False return ret def _refresh_machines_cache(self, machines_cache: Optional[List[str]] = None) -> bool: """Get etcd cluster topology using Etcd API and put it to self._machines_cache :param machines_cache: the list of nodes we want to run through executing API request in addition to values stored in the self._machines_cache :returns: `True` if self._machines_cache was updated with new values :raises EtcdException: if failed to get topology and `machines_cache` was specified. The self._machines_cache will not be updated if nodes from the list are not accessible or if they are not returning correct results.""" if self._use_proxies: value = self._get_machines_cache_from_config() else: try: # we want to go through the list obtained from the config file + last known health topology value = self._get_machines_list(list(set((machines_cache or []) + self.machines_cache))) except etcd.EtcdConnectionFailed: value = [] if value: ret = set(self._machines_cache) != set(value) self._machines_cache = value elif machines_cache: # we are just starting or all nodes were not available at some point raise etcd.EtcdException("Could not get the list of servers, " "maybe you provided the wrong " "host(s) to connect to?") else: return False if self._base_uri not in self._machines_cache: self.set_base_uri(self._machines_cache[0]) self._machines_cache_updated = time.time() return ret def set_base_uri(self, value: str) -> None: if self._base_uri != value: logger.info('Selected new etcd server %s', value) self._base_uri = value class EtcdClient(AbstractEtcdClientWithFailover): ERROR_CLS = EtcdError def __init__(self, config: Dict[str, Any], dns_resolver: DnsCachingResolver, cache_ttl: int = 300) -> None: super(EtcdClient, self).__init__({**config, 'version_prefix': None}, dns_resolver, cache_ttl) def __del__(self) -> None: try: self.http.clear() except (ReferenceError, TypeError, AttributeError): pass def _prepare_get_members(self, etcd_nodes: int) -> Dict[str, Any]: return self._prepare_common_parameters(etcd_nodes) def _get_members(self, base_uri: str, **kwargs: Any) -> List[str]: response = self.http.request(self._MGET, base_uri + self.version_prefix + '/machines', **kwargs) data = self._handle_server_response(response).data.decode('utf-8') return [m.strip() for m in data.split(',') if m.strip()] def _prepare_request(self, kwargs: Dict[str, Any], params: Optional[Dict[str, Any]] = None, method: Optional[str] = None) -> Callable[..., urllib3.response.HTTPResponse]: kwargs['fields'] = params if method in (self._MPOST, self._MPUT): kwargs['encode_multipart'] = False return self.http.request class AbstractEtcd(AbstractDCS): def __init__(self, config: Dict[str, Any], client_cls: Type[AbstractEtcdClientWithFailover], retry_errors_cls: Union[Type[Exception], Tuple[Type[Exception], ...]]) -> None: super(AbstractEtcd, self).__init__(config) self._retry = Retry(deadline=config['retry_timeout'], max_delay=1, max_tries=-1, retry_exceptions=retry_errors_cls) self._ttl = int(config.get('ttl') or 30) self._abstract_client = self.get_etcd_client(config, client_cls) self.__do_not_watch = False self._has_failed = False @property @abc.abstractmethod def _client(self) -> AbstractEtcdClientWithFailover: """return correct type of etcd client""" def reload_config(self, config: Union['Config', Dict[str, Any]]) -> None: super(AbstractEtcd, self).reload_config(config) self._client.reload_config(config.get(self.__class__.__name__.lower(), {})) def retry(self, method: Callable[..., Any], *args: Any, **kwargs: Any) -> Any: retry = self._retry.copy() kwargs['retry'] = retry return retry(method, *args, **kwargs) def _handle_exception(self, e: Exception, name: str = '', do_sleep: bool = False, raise_ex: Optional[Exception] = None) -> None: if not self._has_failed: logger.exception(name) else: logger.error(e) if do_sleep: time.sleep(1) self._has_failed = True if isinstance(raise_ex, Exception): raise raise_ex def handle_etcd_exceptions(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any: try: retval = func(self, *args, **kwargs) self._has_failed = False return retval except (RetryFailedError, etcd.EtcdException) as e: self._handle_exception(e) return False except Exception as e: self._handle_exception(e, raise_ex=self._client.ERROR_CLS('unexpected error')) def _run_and_handle_exceptions(self, method: Callable[..., Any], *args: Any, **kwargs: Any) -> Any: retry = kwargs.pop('retry', self.retry) try: return retry(method, *args, **kwargs) if retry else method(*args, **kwargs) except (RetryFailedError, etcd.EtcdConnectionFailed) as e: raise self._client.ERROR_CLS(e) except etcd.EtcdException as e: self._handle_exception(e) raise ReturnFalseException except Exception as e: self._handle_exception(e, raise_ex=self._client.ERROR_CLS('unexpected error')) def set_socket_options(self, sock: socket.socket, socket_options: Optional[Collection[Tuple[int, int, int]]]) -> None: if socket_options: for opt in socket_options: sock.setsockopt(*opt) def get_etcd_client(self, config: Dict[str, Any], client_cls: Type[AbstractEtcdClientWithFailover]) -> AbstractEtcdClientWithFailover: config = deepcopy(config) if 'proxy' in config: config['use_proxies'] = True config['url'] = config['proxy'] if 'url' in config and isinstance(config['url'], str): r = urlparse(config['url']) config.update({'protocol': r.scheme, 'host': r.hostname, 'port': r.port or 2379, 'username': r.username, 'password': r.password}) elif 'hosts' in config: hosts = config.pop('hosts') default_port = config.pop('port', 2379) protocol = config.get('protocol', 'http') if isinstance(hosts, str): hosts = hosts.split(',') config_hosts: List[str] = [] for value in hosts: if isinstance(value, str): config_hosts.append(uri(protocol, split_host_port(value.strip(), default_port))) config['hosts'] = config_hosts elif 'host' in config: host, port = split_host_port(config['host'], 2379) config['host'] = host if 'port' not in config: config['port'] = int(port) if config.get('cacert'): config['ca_cert'] = config.pop('cacert') if config.get('key') and config.get('cert'): config['cert'] = (config['cert'], config['key']) for p in ('discovery_srv', 'srv_domain'): if p in config: config['srv'] = config.pop(p) dns_resolver = DnsCachingResolver() def create_connection_patched( address: Tuple[str, int], timeout: Any = object(), source_address: Optional[Any] = None, socket_options: Optional[Collection[Tuple[int, int, int]]] = None ) -> socket.socket: host, port = address if host.startswith('['): host = host.strip('[]') err = None for af, socktype, proto, _, sa in dns_resolver.resolve(host, port): sock = None try: sock = socket.socket(af, socktype, proto) self.set_socket_options(sock, socket_options) if timeout is None or isinstance(timeout, (float, int)): sock.settimeout(timeout) if source_address: sock.bind(source_address) sock.connect(sa) return sock except socket.error as e: err = e if sock is not None: sock.close() sock = None if err is not None: raise err raise socket.error("getaddrinfo returns an empty list") urllib3.util.connection.create_connection = create_connection_patched client = None while not client: try: client = client_cls(config, dns_resolver) if 'use_proxies' in config and not client.machines: raise etcd.EtcdException except etcd.EtcdException: logger.info('waiting on etcd') time.sleep(5) return client def set_ttl(self, ttl: int) -> Optional[bool]: ttl = int(ttl) ret = self._ttl != ttl self._ttl = ttl self._client.set_machines_cache_ttl(ttl * 10) return ret @property def ttl(self) -> int: return self._ttl def set_retry_timeout(self, retry_timeout: int) -> None: self._retry.deadline = retry_timeout self._client.set_read_timeout(retry_timeout) def catch_etcd_errors(func: Callable[..., Any]) -> Any: def wrapper(self: AbstractEtcd, *args: Any, **kwargs: Any) -> Any: return self.handle_etcd_exceptions(func, *args, **kwargs) return wrapper class Etcd(AbstractEtcd): def __init__(self, config: Dict[str, Any]) -> None: super(Etcd, self).__init__(config, EtcdClient, (etcd.EtcdLeaderElectionInProgress, EtcdRaftInternal)) self.__do_not_watch = False @property def _client(self) -> EtcdClient: if TYPE_CHECKING: # pragma: no cover assert isinstance(self._abstract_client, EtcdClient) return self._abstract_client def set_ttl(self, ttl: int) -> Optional[bool]: self.__do_not_watch = super(Etcd, self).set_ttl(ttl) return None @staticmethod def member(node: etcd.EtcdResult) -> Member: return Member.from_node(node.modifiedIndex, os.path.basename(node.key), node.ttl, node.value) def _cluster_from_nodes(self, etcd_index: int, nodes: Dict[str, etcd.EtcdResult]) -> Cluster: # get initialize flag initialize = nodes.get(self._INITIALIZE) initialize = initialize and initialize.value # get global dynamic configuration config = nodes.get(self._CONFIG) config = config and ClusterConfig.from_node(config.modifiedIndex, config.value) # get timeline history history = nodes.get(self._HISTORY) history = history and TimelineHistory.from_node(history.modifiedIndex, history.value) # get last know leader lsn and slots status = nodes.get(self._STATUS) or nodes.get(self._LEADER_OPTIME) status = Status.from_node(status and status.value) # get list of members members = [self.member(n) for k, n in nodes.items() if k.startswith(self._MEMBERS) and k.count('/') == 1] # get leader leader = nodes.get(self._LEADER) if leader: member = Member(-1, leader.value, None, {}) member = ([m for m in members if m.name == leader.value] or [member])[0] version = etcd_index if etcd_index > leader.modifiedIndex else leader.modifiedIndex + 1 leader = Leader(version, leader.ttl, member) # failover key failover = nodes.get(self._FAILOVER) if failover: failover = Failover.from_node(failover.modifiedIndex, failover.value) # get synchronization state sync = nodes.get(self._SYNC) sync = SyncState.from_node(sync and sync.modifiedIndex, sync and sync.value) # get failsafe topology failsafe = nodes.get(self._FAILSAFE) try: failsafe = json.loads(failsafe.value) if failsafe else None except Exception: failsafe = None return Cluster(initialize, config, leader, status, members, failover, sync, history, failsafe) def _cluster_loader(self, path: str) -> Cluster: try: result = self.retry(self._client.read, path, recursive=True, quorum=self._ctl) except etcd.EtcdKeyNotFound: return Cluster.empty() nodes = {node.key[len(result.key):].lstrip('/'): node for node in result.leaves} return self._cluster_from_nodes(result.etcd_index, nodes) def _citus_cluster_loader(self, path: str) -> Dict[int, Cluster]: try: result = self.retry(self._client.read, path, recursive=True, quorum=self._ctl) except etcd.EtcdKeyNotFound: return {} clusters: Dict[int, Dict[str, etcd.EtcdResult]] = defaultdict(dict) for node in result.leaves: key = node.key[len(result.key):].lstrip('/').split('/', 1) if len(key) == 2 and citus_group_re.match(key[0]): clusters[int(key[0])][key[1]] = node return {group: self._cluster_from_nodes(result.etcd_index, nodes) for group, nodes in clusters.items()} def _load_cluster( self, path: str, loader: Callable[[str], Union[Cluster, Dict[int, Cluster]]] ) -> Union[Cluster, Dict[int, Cluster]]: cluster = None try: cluster = loader(path) except Exception as e: self._handle_exception(e, 'get_cluster', raise_ex=EtcdError('Etcd is not responding properly')) self._has_failed = False if TYPE_CHECKING: # pragma: no cover assert cluster is not None return cluster @catch_etcd_errors def touch_member(self, data: Dict[str, Any]) -> bool: value = json.dumps(data, separators=(',', ':')) return bool(self._client.set(self.member_path, value, self._ttl)) @catch_etcd_errors def take_leader(self) -> bool: return self.retry(self._client.write, self.leader_path, self._name, ttl=self._ttl) def _do_attempt_to_acquire_leader(self) -> bool: try: return bool(self.retry(self._client.write, self.leader_path, self._name, ttl=self._ttl, prevExist=False)) except etcd.EtcdAlreadyExist: logger.info('Could not take out TTL lock') return False @catch_return_false_exception def attempt_to_acquire_leader(self) -> bool: return self._run_and_handle_exceptions(self._do_attempt_to_acquire_leader, retry=None) @catch_etcd_errors def set_failover_value(self, value: str, version: Optional[int] = None) -> bool: return bool(self._client.write(self.failover_path, value, prevIndex=version or 0)) @catch_etcd_errors def set_config_value(self, value: str, version: Optional[int] = None) -> bool: return bool(self._client.write(self.config_path, value, prevIndex=version or 0)) @catch_etcd_errors def _write_leader_optime(self, last_lsn: str) -> bool: return bool(self._client.set(self.leader_optime_path, last_lsn)) @catch_etcd_errors def _write_status(self, value: str) -> bool: return bool(self._client.set(self.status_path, value)) def _do_update_leader(self) -> bool: try: return self.retry(self._client.write, self.leader_path, self._name, prevValue=self._name, ttl=self._ttl) is not None except etcd.EtcdKeyNotFound: return self._do_attempt_to_acquire_leader() @catch_etcd_errors def _write_failsafe(self, value: str) -> bool: return bool(self._client.set(self.failsafe_path, value)) @catch_return_false_exception def _update_leader(self, leader: Leader) -> bool: return bool(self._run_and_handle_exceptions(self._do_update_leader, retry=None)) @catch_etcd_errors def initialize(self, create_new: bool = True, sysid: str = "") -> bool: return bool(self.retry(self._client.write, self.initialize_path, sysid, prevExist=(not create_new))) @catch_etcd_errors def _delete_leader(self, leader: Leader) -> bool: return bool(self._client.delete(self.leader_path, prevValue=self._name)) @catch_etcd_errors def cancel_initialization(self) -> bool: return bool(self.retry(self._client.delete, self.initialize_path)) @catch_etcd_errors def delete_cluster(self) -> bool: return bool(self.retry(self._client.delete, self.client_path(''), recursive=True)) @catch_etcd_errors def set_history_value(self, value: str) -> bool: return bool(self._client.write(self.history_path, value)) @catch_etcd_errors def set_sync_state_value(self, value: str, version: Optional[int] = None) -> Union[int, bool]: return self.retry(self._client.write, self.sync_path, value, prevIndex=version or 0).modifiedIndex @catch_etcd_errors def delete_sync_state(self, version: Optional[int] = None) -> bool: return bool(self.retry(self._client.delete, self.sync_path, prevIndex=version or 0)) def watch(self, leader_version: Optional[int], timeout: float) -> bool: if self.__do_not_watch: self.__do_not_watch = False return True if leader_version: end_time = time.time() + timeout while timeout >= 1: # when timeout is too small urllib3 doesn't have enough time to connect try: result = self._client.watch(self.leader_path, index=leader_version, timeout=timeout + 0.5) self._has_failed = False if result.action == 'compareAndSwap': time.sleep(0.01) # Synchronous work of all cluster members with etcd is less expensive # than reestablishing http connection every time from every replica. return True except etcd.EtcdWatchTimedOut: self._has_failed = False return False except (etcd.EtcdEventIndexCleared, etcd.EtcdWatcherCleared): # Watch failed self._has_failed = False return True # leave the loop, because watch with the same parameters will fail anyway except etcd.EtcdException as e: self._handle_exception(e, 'watch', True) timeout = end_time - time.time() try: return super(Etcd, self).watch(None, timeout) finally: self.event.clear() etcd.EtcdError.error_exceptions[300] = EtcdRaftInternal patroni-3.2.2/patroni/dcs/etcd3.py000066400000000000000000001170011455170150700170000ustar00rootroot00000000000000from __future__ import absolute_import import base64 import etcd import json import logging import os import socket import sys import time import urllib3 from collections import defaultdict from enum import IntEnum from urllib3.exceptions import ReadTimeoutError, ProtocolError from threading import Condition, Lock, Thread from typing import Any, Callable, Collection, Dict, Iterator, List, Optional, Tuple, Type, TYPE_CHECKING, Union from . import ClusterConfig, Cluster, Failover, Leader, Member, Status, SyncState, \ TimelineHistory, catch_return_false_exception, citus_group_re from .etcd import AbstractEtcdClientWithFailover, AbstractEtcd, catch_etcd_errors, DnsCachingResolver, Retry from ..exceptions import DCSError, PatroniException from ..utils import deep_compare, enable_keepalive, iter_response_objects, RetryFailedError, USER_AGENT logger = logging.getLogger(__name__) class Etcd3Error(DCSError): pass class UnsupportedEtcdVersion(PatroniException): pass # google.golang.org/grpc/codes class GRPCCode(IntEnum): OK = 0 Canceled = 1 Unknown = 2 InvalidArgument = 3 DeadlineExceeded = 4 NotFound = 5 AlreadyExists = 6 PermissionDenied = 7 ResourceExhausted = 8 FailedPrecondition = 9 Aborted = 10 OutOfRange = 11 Unimplemented = 12 Internal = 13 Unavailable = 14 DataLoss = 15 Unauthenticated = 16 GRPCcodeToText: Dict[int, str] = {v: k for k, v in GRPCCode.__dict__['_member_map_'].items()} class Etcd3Exception(etcd.EtcdException): pass class Etcd3ClientError(Etcd3Exception): def __init__(self, code: Optional[int] = None, error: Optional[str] = None, status: Optional[int] = None) -> None: if not hasattr(self, 'error'): self.error = error and error.strip() self.codeText = GRPCcodeToText.get(code) if code is not None else None self.status = status def __repr__(self) -> str: return "<{0} error: '{1}', code: {2}>"\ .format(self.__class__.__name__, getattr(self, 'error', None), getattr(self, 'code', None)) __str__ = __repr__ def as_dict(self) -> Dict[str, Any]: return {'error': getattr(self, 'error', None), 'code': getattr(self, 'code', None), 'codeText': self.codeText, 'status': self.status} @classmethod def get_subclasses(cls) -> Iterator[Type['Etcd3ClientError']]: for subclass in cls.__subclasses__(): for subsubclass in subclass.get_subclasses(): yield subsubclass yield subclass class Unknown(Etcd3ClientError): code = GRPCCode.Unknown class InvalidArgument(Etcd3ClientError): code = GRPCCode.InvalidArgument class DeadlineExceeded(Etcd3ClientError): code = GRPCCode.DeadlineExceeded error = "context deadline exceeded" class NotFound(Etcd3ClientError): code = GRPCCode.NotFound class FailedPrecondition(Etcd3ClientError): code = GRPCCode.FailedPrecondition class Unavailable(Etcd3ClientError): code = GRPCCode.Unavailable # https://github.com/etcd-io/etcd/commits/main/api/v3rpc/rpctypes/error.go class LeaseNotFound(NotFound): error = "etcdserver: requested lease not found" class UserEmpty(InvalidArgument): error = "etcdserver: user name is empty" class AuthFailed(InvalidArgument): error = "etcdserver: authentication failed, invalid user ID or password" class AuthOldRevision(InvalidArgument): error = "etcdserver: revision of auth store is old" class PermissionDenied(Etcd3ClientError): code = GRPCCode.PermissionDenied error = "etcdserver: permission denied" class AuthNotEnabled(FailedPrecondition): error = "etcdserver: authentication is not enabled" class InvalidAuthToken(Etcd3ClientError): code = GRPCCode.Unauthenticated error = "etcdserver: invalid auth token" errStringToClientError = {getattr(s, 'error'): s for s in Etcd3ClientError.get_subclasses() if hasattr(s, 'error')} errCodeToClientError = {getattr(s, 'code'): s for s in Etcd3ClientError.__subclasses__()} def _raise_for_data(data: Union[bytes, str, Dict[str, Union[Any, Dict[str, Any]]]], status_code: Optional[int] = None) -> Etcd3ClientError: try: if TYPE_CHECKING: # pragma: no cover assert isinstance(data, dict) data_error: Optional[Dict[str, Any]] = data.get('error') or data.get('Error') if isinstance(data_error, dict): # streaming response status_code = data_error.get('http_code') code: Optional[int] = data_error['grpc_code'] error: str = data_error['message'] else: data_code = data.get('code') or data.get('Code') if TYPE_CHECKING: # pragma: no cover assert not isinstance(data_code, dict) code = data_code error = str(data_error) except Exception: error = str(data) code = GRPCCode.Unknown err = errStringToClientError.get(error) or errCodeToClientError.get(code) or Unknown return err(code, error, status_code) def to_bytes(v: Union[str, bytes]) -> bytes: return v if isinstance(v, bytes) else v.encode('utf-8') def prefix_range_end(v: str) -> bytes: ret = bytearray(to_bytes(v)) for i in range(len(ret) - 1, -1, -1): if ret[i] < 0xff: ret[i] += 1 break return bytes(ret) def base64_encode(v: Union[str, bytes]) -> str: return base64.b64encode(to_bytes(v)).decode('utf-8') def base64_decode(v: str) -> str: return base64.b64decode(v).decode('utf-8') def build_range_request(key: str, range_end: Union[bytes, str, None] = None) -> Dict[str, Any]: fields = {'key': base64_encode(key)} if range_end: fields['range_end'] = base64_encode(range_end) return fields class ReAuthenticateMode(IntEnum): NOT_REQUIRED = 0 REQUIRED = 1 WITHOUT_WATCHER_RESTART = 2 def _handle_auth_errors(func: Callable[..., Any]) -> Any: def wrapper(self: 'Etcd3Client', *args: Any, **kwargs: Any) -> Any: return self.handle_auth_errors(func, *args, **kwargs) return wrapper class Etcd3Client(AbstractEtcdClientWithFailover): ERROR_CLS = Etcd3Error def __init__(self, config: Dict[str, Any], dns_resolver: DnsCachingResolver, cache_ttl: int = 300) -> None: self._reauthenticate_reason = ReAuthenticateMode.NOT_REQUIRED self._token = None self._cluster_version: Tuple[int, ...] = tuple() super(Etcd3Client, self).__init__({**config, 'version_prefix': '/v3beta'}, dns_resolver, cache_ttl) try: self.authenticate() except AuthFailed as e: logger.fatal('Etcd3 authentication failed: %r', e) sys.exit(1) def _get_headers(self) -> Dict[str, str]: headers = urllib3.make_headers(user_agent=USER_AGENT) if self._token and self._cluster_version >= (3, 3, 0): headers['authorization'] = self._token return headers def _prepare_request(self, kwargs: Dict[str, Any], params: Optional[Dict[str, Any]] = None, method: Optional[str] = None) -> Callable[..., urllib3.response.HTTPResponse]: if params is not None: kwargs['body'] = json.dumps(params) kwargs['headers']['Content-Type'] = 'application/json' return self.http.urlopen def _handle_server_response(self, response: urllib3.response.HTTPResponse) -> Dict[str, Any]: data = response.data try: data = data.decode('utf-8') ret: Dict[str, Any] = json.loads(data) if response.status < 400: return ret except (TypeError, ValueError, UnicodeError) as e: if response.status < 400: raise etcd.EtcdException('Server response was not valid JSON: %r' % e) ret = {} raise _raise_for_data(ret or data, response.status) def _ensure_version_prefix(self, base_uri: str, **kwargs: Any) -> None: if self.version_prefix != '/v3': response = self.http.urlopen(self._MGET, base_uri + '/version', **kwargs) response = self._handle_server_response(response) server_version_str = response['etcdserver'] server_version = tuple(int(x) for x in server_version_str.split('.')) cluster_version_str = response['etcdcluster'] self._cluster_version = tuple(int(x) for x in cluster_version_str.split('.')) if self._cluster_version < (3, 0) or server_version < (3, 0, 4): raise UnsupportedEtcdVersion('Detected Etcd version {0} is lower than 3.0.4'.format(server_version_str)) if self._cluster_version < (3, 3): if self.version_prefix != '/v3alpha': if self._cluster_version < (3, 1): logger.warning('Detected Etcd version %s is lower than 3.1.0, watches are not supported', cluster_version_str) if self.username and self.password: logger.warning('Detected Etcd version %s is lower than 3.3.0, authentication is not supported', cluster_version_str) self.version_prefix = '/v3alpha' elif self._cluster_version < (3, 4): self.version_prefix = '/v3beta' else: self.version_prefix = '/v3' def _prepare_get_members(self, etcd_nodes: int) -> Dict[str, Any]: kwargs = self._prepare_common_parameters(etcd_nodes) self._prepare_request(kwargs, {}) return kwargs def _get_members(self, base_uri: str, **kwargs: Any) -> List[str]: self._ensure_version_prefix(base_uri, **kwargs) resp = self.http.urlopen(self._MPOST, base_uri + self.version_prefix + '/cluster/member/list', **kwargs) members = self._handle_server_response(resp)['members'] return [url for member in members for url in member.get('clientURLs', [])] def call_rpc(self, method: str, fields: Dict[str, Any], retry: Optional[Retry] = None) -> Dict[str, Any]: fields['retry'] = retry return self.api_execute(self.version_prefix + method, self._MPOST, fields) def authenticate(self, *, restart_watcher: bool = True, retry: Optional[Retry] = None) -> bool: if self._use_proxies and not self._cluster_version: kwargs = self._prepare_common_parameters(1) self._ensure_version_prefix(self._base_uri, **kwargs) if not (self._cluster_version >= (3, 3) and self.username and self.password): return False logger.info('Trying to authenticate on Etcd...') old_token, self._token = self._token, None try: response = self.call_rpc('/auth/authenticate', {'name': self.username, 'password': self.password}, retry) except AuthNotEnabled: logger.info('Etcd authentication is not enabled') self._token = None except Exception: self._token = old_token raise else: self._token = response.get('token') return old_token != self._token def handle_auth_errors(self: 'Etcd3Client', func: Callable[..., Any], *args: Any, retry: Optional[Retry] = None, **kwargs: Any) -> Any: exc = None while True: if self._reauthenticate_reason: if self.username and self.password: self.authenticate( restart_watcher=self._reauthenticate_reason != ReAuthenticateMode.WITHOUT_WATCHER_RESTART, retry=retry) self._reauthenticate_reason = ReAuthenticateMode.NOT_REQUIRED if retry: retry.ensure_deadline(0) else: msg = 'Username or password not set, authentication is not possible' logger.fatal(msg) raise exc or Etcd3Exception(msg) try: return func(self, *args, retry=retry, **kwargs) except (UserEmpty, PermissionDenied) as e: # no token provided # PermissionDenied is raised on 3.0 and 3.1 if self._cluster_version < (3, 3) and (not isinstance(e, PermissionDenied) or self._cluster_version < (3, 2)): raise UnsupportedEtcdVersion('Authentication is required by Etcd cluster but not ' 'supported on version lower than 3.3.0. Cluster version: ' '{0}'.format('.'.join(map(str, self._cluster_version)))) exc = e except InvalidAuthToken as e: logger.error('Invalid auth token: %s', self._token) exc = e except AuthOldRevision as e: logger.error('Auth token is for old revision of auth store') exc = e self._reauthenticate_reason = ReAuthenticateMode.WITHOUT_WATCHER_RESTART \ if isinstance(exc, AuthOldRevision) else ReAuthenticateMode.REQUIRED if not retry: raise exc retry.ensure_deadline(0.5, exc) @_handle_auth_errors def range(self, key: str, range_end: Union[bytes, str, None] = None, serializable: bool = True, *, retry: Optional[Retry] = None) -> Dict[str, Any]: params = build_range_request(key, range_end) params['serializable'] = serializable # For better performance. We can tolerate stale reads return self.call_rpc('/kv/range', params, retry) def prefix(self, key: str, serializable: bool = True, *, retry: Optional[Retry] = None) -> Dict[str, Any]: return self.range(key, prefix_range_end(key), serializable, retry=retry) @_handle_auth_errors def lease_grant(self, ttl: int, *, retry: Optional[Retry] = None) -> str: return self.call_rpc('/lease/grant', {'TTL': ttl}, retry)['ID'] def lease_keepalive(self, ID: str, *, retry: Optional[Retry] = None) -> Optional[str]: return self.call_rpc('/lease/keepalive', {'ID': ID}, retry).get('result', {}).get('TTL') @_handle_auth_errors def txn(self, compare: Dict[str, Any], success: Dict[str, Any], failure: Optional[Dict[str, Any]] = None, *, retry: Optional[Retry] = None) -> Dict[str, Any]: fields = {'compare': [compare], 'success': [success]} if failure: fields['failure'] = [failure] ret = self.call_rpc('/kv/txn', fields, retry) return ret if failure or ret.get('succeeded') else {} @_handle_auth_errors def put(self, key: str, value: str, lease: Optional[str] = None, create_revision: Optional[str] = None, mod_revision: Optional[str] = None, *, retry: Optional[Retry] = None) -> Dict[str, Any]: fields = {'key': base64_encode(key), 'value': base64_encode(value)} if lease: fields['lease'] = lease if create_revision is not None: compare = {'target': 'CREATE', 'create_revision': create_revision} elif mod_revision is not None: compare = {'target': 'MOD', 'mod_revision': mod_revision} else: return self.call_rpc('/kv/put', fields, retry) compare['key'] = fields['key'] return self.txn(compare, {'request_put': fields}, retry=retry) @_handle_auth_errors def deleterange(self, key: str, range_end: Union[bytes, str, None] = None, mod_revision: Optional[str] = None, *, retry: Optional[Retry] = None) -> Dict[str, Any]: fields = build_range_request(key, range_end) if mod_revision is None: return self.call_rpc('/kv/deleterange', fields, retry) compare = {'target': 'MOD', 'mod_revision': mod_revision, 'key': fields['key']} return self.txn(compare, {'request_delete_range': fields}, retry=retry) def deleteprefix(self, key: str, *, retry: Optional[Retry] = None) -> Dict[str, Any]: return self.deleterange(key, prefix_range_end(key), retry=retry) def watchrange(self, key: str, range_end: Union[bytes, str, None] = None, start_revision: Optional[str] = None, filters: Optional[List[Dict[str, Any]]] = None, read_timeout: Optional[float] = None) -> urllib3.response.HTTPResponse: """returns: response object""" params = build_range_request(key, range_end) if start_revision is not None: params['start_revision'] = start_revision params['filters'] = filters or [] kwargs = self._prepare_common_parameters(1, self.read_timeout) request_executor = self._prepare_request(kwargs, {'create_request': params}) kwargs.update(timeout=urllib3.Timeout(connect=kwargs['timeout'], read=read_timeout), retries=0) return request_executor(self._MPOST, self._base_uri + self.version_prefix + '/watch', **kwargs) def watchprefix(self, key: str, start_revision: Optional[str] = None, filters: Optional[List[Dict[str, Any]]] = None, read_timeout: Optional[float] = None) -> urllib3.response.HTTPResponse: return self.watchrange(key, prefix_range_end(key), start_revision, filters, read_timeout) class KVCache(Thread): def __init__(self, dcs: 'Etcd3', client: 'PatroniEtcd3Client') -> None: super(KVCache, self).__init__() self.daemon = True self._dcs = dcs self._client = client self.condition = Condition() self._config_key = base64_encode(dcs.config_path) self._leader_key = base64_encode(dcs.leader_path) self._optime_key = base64_encode(dcs.leader_optime_path) self._status_key = base64_encode(dcs.status_path) self._name = base64_encode(getattr(dcs, '_name')) # pyright self._is_ready = False self._response = None self._response_lock = Lock() self._object_cache = {} self._object_cache_lock = Lock() self.start() def set(self, value: Dict[str, Any], overwrite: bool = False) -> Tuple[bool, Optional[Dict[str, Any]]]: with self._object_cache_lock: name = value['key'] old_value = self._object_cache.get(name) ret = not old_value or int(old_value['mod_revision']) < int(value['mod_revision']) if ret or overwrite and old_value and old_value['mod_revision'] == value['mod_revision']: self._object_cache[name] = value return ret, old_value def delete(self, name: str, mod_revision: str) -> Tuple[bool, Optional[Dict[str, Any]]]: with self._object_cache_lock: old_value = self._object_cache.get(name) ret = old_value and int(old_value['mod_revision']) < int(mod_revision) if ret: del self._object_cache[name] return bool(not old_value or ret), old_value def copy(self) -> List[Dict[str, Any]]: with self._object_cache_lock: return [v.copy() for v in self._object_cache.values()] def get(self, name: str) -> Optional[Dict[str, Any]]: with self._object_cache_lock: return self._object_cache.get(name) def _process_event(self, event: Dict[str, Any]) -> None: kv = event['kv'] key = kv['key'] if event.get('type') == 'DELETE': success, old_value = self.delete(key, kv['mod_revision']) else: success, old_value = self.set(kv, True) if success: old_value = old_value and old_value.get('value') new_value = kv.get('value') value_changed = old_value != new_value and \ (key == self._leader_key or key in (self._optime_key, self._status_key) and new_value is not None or key == self._config_key and old_value is not None and new_value is not None) if value_changed: logger.debug('%s changed from %s to %s', key, old_value, new_value) # We also want to wake up HA loop on replicas if leader optime (or status key) was updated if value_changed and (key not in (self._optime_key, self._status_key) or (self.get(self._leader_key) or {}).get('value') != self._name): self._dcs.event.set() def _process_message(self, message: Dict[str, Any]) -> None: logger.debug('Received message: %s', message) if 'error' in message: raise _raise_for_data(message) events: List[Dict[str, Any]] = message.get('result', {}).get('events', []) for event in events: self._process_event(event) @staticmethod def _finish_response(response: urllib3.response.HTTPResponse) -> None: try: response.close() finally: response.release_conn() def _do_watch(self, revision: str) -> None: with self._response_lock: self._response = None # We do most of requests with timeouts. The only exception /watch requests to Etcd v3. # In order to interrupt the /watch request we do socket.shutdown() from the main thread, # which doesn't work on Windows. Therefore we want to use the last resort, `read_timeout`. # Setting it to TTL will help to partially mitigate the problem. # Setting it to lower value is not nice because for idling clusters it will increase # the numbers of interrupts and reconnects. read_timeout = self._dcs.ttl if os.name == 'nt' else None response = self._client.watchprefix(self._dcs.cluster_prefix, revision, read_timeout=read_timeout) with self._response_lock: if self._response is None: self._response = response if not self._response: return self._finish_response(response) for message in iter_response_objects(response): self._process_message(message) def _build_cache(self) -> None: result = self._dcs.retry(self._client.prefix, self._dcs.cluster_prefix) with self._object_cache_lock: self._object_cache = {node['key']: node for node in result.get('kvs', [])} with self.condition: self._is_ready = True self.condition.notify() try: self._do_watch(result['header']['revision']) except Exception as e: # Following exceptions are expected on Windows because the /watch request is done with `read_timeout` if not (os.name == 'nt' and isinstance(e, (ReadTimeoutError, ProtocolError))): logger.error('watchprefix failed: %r', e) finally: with self.condition: self._is_ready = False with self._response_lock: response, self._response = self._response, None if isinstance(response, urllib3.response.HTTPResponse): self._finish_response(response) def run(self) -> None: while True: try: self._build_cache() except Exception as e: logger.error('KVCache.run %r', e) time.sleep(1) def kill_stream(self) -> None: sock = None with self._response_lock: if isinstance(self._response, urllib3.response.HTTPResponse): try: sock = self._response.connection.sock if self._response.connection else None except Exception: sock = None else: self._response = False if sock: try: sock.shutdown(socket.SHUT_RDWR) sock.close() except Exception as e: logger.debug('Error on socket.shutdown: %r', e) def is_ready(self) -> bool: """Must be called only when holding the lock on `condition`""" return self._is_ready class PatroniEtcd3Client(Etcd3Client): def __init__(self, *args: Any, **kwargs: Any) -> None: self._kv_cache = None super(PatroniEtcd3Client, self).__init__(*args, **kwargs) def configure(self, etcd3: 'Etcd3') -> None: self._etcd3 = etcd3 def start_watcher(self) -> None: if self._cluster_version >= (3, 1): self._kv_cache = KVCache(self._etcd3, self) def _restart_watcher(self) -> None: if self._kv_cache: self._kv_cache.kill_stream() def set_base_uri(self, value: str) -> None: super(PatroniEtcd3Client, self).set_base_uri(value) self._restart_watcher() def authenticate(self, *, restart_watcher: bool = True, retry: Optional[Retry] = None) -> bool: ret = super(PatroniEtcd3Client, self).authenticate(restart_watcher=restart_watcher, retry=retry) if ret and restart_watcher: self._restart_watcher() return ret def _wait_cache(self, timeout: float) -> None: stop_time = time.time() + timeout while self._kv_cache and not self._kv_cache.is_ready(): timeout = stop_time - time.time() if timeout <= 0: raise RetryFailedError('Exceeded retry deadline') self._kv_cache.condition.wait(timeout) def get_cluster(self, path: str) -> List[Dict[str, Any]]: if self._kv_cache and path.startswith(self._etcd3.cluster_prefix): with self._kv_cache.condition: self._wait_cache(self.read_timeout) ret = self._kv_cache.copy() else: serializable = not getattr(self._etcd3, '_ctl') # use linearizable for patronictl ret = self._etcd3.retry(self.prefix, path, serializable).get('kvs', []) for node in ret: node.update({'key': base64_decode(node['key']), 'value': base64_decode(node.get('value', '')), 'lease': node.get('lease')}) return ret def call_rpc(self, method: str, fields: Dict[str, Any], retry: Optional[Retry] = None) -> Dict[str, Any]: ret = super(PatroniEtcd3Client, self).call_rpc(method, fields, retry) if self._kv_cache: value = delete = None # For the 'failure' case we only support a second (nested) transaction that attempts to # update/delete the same keys. Anything more complex than that we don't need and therefore it doesn't # make sense to write a universal response analyzer and we can just check expected JSON path. if method == '/kv/txn'\ and (ret.get('succeeded') or 'failure' in fields and 'request_txn' in fields['failure'][0] and ret.get('responses', [{'response_txn': {'succeeded': False}}])[0] .get('response_txn', {}).get('succeeded')): on_success = fields['success'][0] value = on_success.get('request_put') delete = on_success.get('request_delete_range') elif method == '/kv/put' and ret: value = fields elif method == '/kv/deleterange' and ret: delete = fields if value: value['mod_revision'] = ret['header']['revision'] self._kv_cache.set(value) elif delete and 'range_end' not in delete: self._kv_cache.delete(delete['key'], ret['header']['revision']) return ret def txn(self, compare: Dict[str, Any], success: Dict[str, Any], failure: Optional[Dict[str, Any]] = None, *, retry: Optional[Retry] = None) -> Dict[str, Any]: ret = super(PatroniEtcd3Client, self).txn(compare, success, failure, retry=retry) # Here we abuse the fact that the `failure` is only set in the call from update_leader(). # In all other cases the txn() call failure may be an indicator of a stale cache, # and therefore we want to restart watcher. if not failure and not ret: self._restart_watcher() return ret class Etcd3(AbstractEtcd): def __init__(self, config: Dict[str, Any]) -> None: super(Etcd3, self).__init__(config, PatroniEtcd3Client, (DeadlineExceeded, Unavailable, FailedPrecondition)) self.__do_not_watch = False self._lease = None self._last_lease_refresh = 0 self._client.configure(self) if not self._ctl: self._client.start_watcher() self.create_lease() @property def _client(self) -> PatroniEtcd3Client: if TYPE_CHECKING: # pragma: no cover assert isinstance(self._abstract_client, PatroniEtcd3Client) return self._abstract_client def set_socket_options(self, sock: socket.socket, socket_options: Optional[Collection[Tuple[int, int, int]]]) -> None: if TYPE_CHECKING: # pragma: no cover assert self._retry.deadline is not None enable_keepalive(sock, self.ttl, int(self.loop_wait + self._retry.deadline)) def set_ttl(self, ttl: int) -> Optional[bool]: self.__do_not_watch = super(Etcd3, self).set_ttl(ttl) if self.__do_not_watch: self._lease = None return None def _do_refresh_lease(self, force: bool = False, retry: Optional[Retry] = None) -> bool: if not force and self._lease and self._last_lease_refresh + self._loop_wait > time.time(): return False if self._lease and not self._client.lease_keepalive(self._lease, retry=retry): self._lease = None ret = not self._lease if ret: self._lease = self._client.lease_grant(self._ttl, retry=retry) self._last_lease_refresh = time.time() return ret def refresh_lease(self) -> bool: try: return self.retry(self._do_refresh_lease) except (Etcd3ClientError, RetryFailedError): logger.exception('refresh_lease') raise Etcd3Error('Failed to keepalive/grant lease') def create_lease(self) -> None: while not self._lease: try: self.refresh_lease() except Etcd3Error: logger.info('waiting on etcd') time.sleep(5) @property def cluster_prefix(self) -> str: return self._base_path + '/' if self.is_citus_coordinator() else self.client_path('') @staticmethod def member(node: Dict[str, str]) -> Member: return Member.from_node(node['mod_revision'], os.path.basename(node['key']), node['lease'], node['value']) def _cluster_from_nodes(self, nodes: Dict[str, Any]) -> Cluster: # get initialize flag initialize = nodes.get(self._INITIALIZE) initialize = initialize and initialize['value'] # get global dynamic configuration config = nodes.get(self._CONFIG) config = config and ClusterConfig.from_node(config['mod_revision'], config['value']) # get timeline history history = nodes.get(self._HISTORY) history = history and TimelineHistory.from_node(history['mod_revision'], history['value']) # get last know leader lsn and slots status = nodes.get(self._STATUS) or nodes.get(self._LEADER_OPTIME) status = Status.from_node(status and status['value']) # get list of members members = [self.member(n) for k, n in nodes.items() if k.startswith(self._MEMBERS) and k.count('/') == 1] # get leader leader = nodes.get(self._LEADER) if not self._ctl and leader and leader['value'] == self._name and self._lease != leader.get('lease'): logger.warning('I am the leader but not owner of the lease') if leader: member = Member(-1, leader['value'], None, {}) member = ([m for m in members if m.name == leader['value']] or [member])[0] leader = Leader(leader['mod_revision'], leader['lease'], member) # failover key failover = nodes.get(self._FAILOVER) if failover: failover = Failover.from_node(failover['mod_revision'], failover['value']) # get synchronization state sync = nodes.get(self._SYNC) sync = SyncState.from_node(sync and sync['mod_revision'], sync and sync['value']) # get failsafe topology failsafe = nodes.get(self._FAILSAFE) try: failsafe = json.loads(failsafe['value']) if failsafe else None except Exception: failsafe = None return Cluster(initialize, config, leader, status, members, failover, sync, history, failsafe) def _cluster_loader(self, path: str) -> Cluster: nodes = {node['key'][len(path):]: node for node in self._client.get_cluster(path) if node['key'].startswith(path)} return self._cluster_from_nodes(nodes) def _citus_cluster_loader(self, path: str) -> Dict[int, Cluster]: clusters: Dict[int, Dict[str, Dict[str, Any]]] = defaultdict(dict) path = self._base_path + '/' for node in self._client.get_cluster(path): key = node['key'][len(path):].split('/', 1) if len(key) == 2 and citus_group_re.match(key[0]): clusters[int(key[0])][key[1]] = node return {group: self._cluster_from_nodes(nodes) for group, nodes in clusters.items()} def _load_cluster( self, path: str, loader: Callable[[str], Union[Cluster, Dict[int, Cluster]]] ) -> Union[Cluster, Dict[int, Cluster]]: cluster = None try: cluster = loader(path) except UnsupportedEtcdVersion: raise except Exception as e: self._handle_exception(e, 'get_cluster', raise_ex=Etcd3Error('Etcd is not responding properly')) self._has_failed = False if TYPE_CHECKING: # pragma: no cover assert cluster is not None return cluster @catch_etcd_errors def touch_member(self, data: Dict[str, Any]) -> bool: try: self.refresh_lease() except Etcd3Error: return False cluster = self.cluster member = cluster and cluster.get_member(self._name, fallback_to_leader=False) if member and member.session == self._lease and deep_compare(data, member.data): return True value = json.dumps(data, separators=(',', ':')) try: return bool(self._client.put(self.member_path, value, self._lease)) except LeaseNotFound: self._lease = None logger.error('Our lease disappeared from Etcd, can not "touch_member"') return False @catch_etcd_errors def take_leader(self) -> bool: return self.retry(self._client.put, self.leader_path, self._name, self._lease) def _do_attempt_to_acquire_leader(self, retry: Retry) -> bool: def _retry(*args: Any, **kwargs: Any) -> Any: kwargs['retry'] = retry return retry(*args, **kwargs) try: return _retry(self._client.put, self.leader_path, self._name, self._lease, create_revision='0') except LeaseNotFound: logger.error('Our lease disappeared from Etcd. Will try to get a new one and retry attempt') self._lease = None retry.ensure_deadline(0) _retry(self._do_refresh_lease) retry.ensure_deadline(1, Etcd3Error('_do_attempt_to_acquire_leader timeout')) return _retry(self._client.put, self.leader_path, self._name, self._lease, create_revision='0') @catch_return_false_exception def attempt_to_acquire_leader(self) -> bool: retry = self._retry.copy() def _retry(*args: Any, **kwargs: Any) -> Any: kwargs['retry'] = retry return retry(*args, **kwargs) self._run_and_handle_exceptions(self._do_refresh_lease, retry=_retry) retry.ensure_deadline(1, Etcd3Error('attempt_to_acquire_leader timeout')) ret = self._run_and_handle_exceptions(self._do_attempt_to_acquire_leader, retry, retry=None) if not ret: logger.info('Could not take out TTL lock') return ret @catch_etcd_errors def set_failover_value(self, value: str, version: Optional[str] = None) -> bool: return bool(self._client.put(self.failover_path, value, mod_revision=version)) @catch_etcd_errors def set_config_value(self, value: str, version: Optional[str] = None) -> bool: return bool(self._client.put(self.config_path, value, mod_revision=version)) @catch_etcd_errors def _write_leader_optime(self, last_lsn: str) -> bool: return bool(self._client.put(self.leader_optime_path, last_lsn)) @catch_etcd_errors def _write_status(self, value: str) -> bool: return bool(self._client.put(self.status_path, value)) @catch_etcd_errors def _write_failsafe(self, value: str) -> bool: return bool(self._client.put(self.failsafe_path, value)) @catch_return_false_exception def _update_leader(self, leader: Leader) -> bool: retry = self._retry.copy() def _retry(*args: Any, **kwargs: Any) -> Any: kwargs['retry'] = retry return retry(*args, **kwargs) self._run_and_handle_exceptions(self._do_refresh_lease, True, retry=_retry) if self._lease and leader.session != self._lease: retry.ensure_deadline(1, Etcd3Error('update_leader timeout')) fields = {'key': base64_encode(self.leader_path), 'value': base64_encode(self._name), 'lease': self._lease} # First we try to update lease on existing leader key "hoping" that we still owning it compare1 = {'key': fields['key'], 'target': 'VALUE', 'value': fields['value']} request_put = {'request_put': fields} # If the first comparison failed we will try to create the new leader key in a transaction compare2 = {'key': fields['key'], 'target': 'CREATE', 'create_revision': '0'} request_txn = {'request_txn': {'compare': [compare2], 'success': [request_put]}} ret = self._run_and_handle_exceptions(self._client.txn, compare1, request_put, request_txn, retry=_retry) return ret.get('succeeded', False)\ or ret.get('responses', [{}])[0].get('response_txn', {}).get('succeeded', False) return bool(self._lease) @catch_etcd_errors def initialize(self, create_new: bool = True, sysid: str = ""): return self.retry(self._client.put, self.initialize_path, sysid, create_revision='0' if create_new else None) @catch_etcd_errors def _delete_leader(self, leader: Leader) -> bool: fields = build_range_request(self.leader_path) compare = {'key': fields['key'], 'target': 'VALUE', 'value': base64_encode(self._name)} return bool(self._client.txn(compare, {'request_delete_range': fields})) @catch_etcd_errors def cancel_initialization(self) -> bool: return self.retry(self._client.deleterange, self.initialize_path) @catch_etcd_errors def delete_cluster(self) -> bool: return self.retry(self._client.deleteprefix, self.client_path('')) @catch_etcd_errors def set_history_value(self, value: str) -> bool: return bool(self._client.put(self.history_path, value)) @catch_etcd_errors def set_sync_state_value(self, value: str, version: Optional[str] = None) -> Union[str, bool]: return self.retry(self._client.put, self.sync_path, value, mod_revision=version)\ .get('header', {}).get('revision', False) @catch_etcd_errors def delete_sync_state(self, version: Optional[str] = None) -> bool: return self.retry(self._client.deleterange, self.sync_path, mod_revision=version) def watch(self, leader_version: Optional[str], timeout: float) -> bool: if self.__do_not_watch: self.__do_not_watch = False return True # We want to give a bit more time to non-leader nodes to synchronize HA loops if leader_version: timeout += 0.5 try: return super(Etcd3, self).watch(None, timeout) finally: self.event.clear() patroni-3.2.2/patroni/dcs/exhibitor.py000066400000000000000000000056461455170150700200060ustar00rootroot00000000000000import json import logging import random import time from typing import Any, Callable, Dict, List, Union from . import Cluster from .zookeeper import ZooKeeper from ..request import get as requests_get from ..utils import uri logger = logging.getLogger(__name__) class ExhibitorEnsembleProvider(object): TIMEOUT = 3.1 def __init__(self, hosts: List[str], port: int, uri_path: str = '/exhibitor/v1/cluster/list', poll_interval: int = 300) -> None: self._exhibitor_port = port self._uri_path = uri_path self._poll_interval = poll_interval self._exhibitors: List[str] = hosts self._boot_exhibitors = hosts self._zookeeper_hosts = '' self._next_poll = None while not self.poll(): logger.info('waiting on exhibitor') time.sleep(5) def poll(self) -> bool: if self._next_poll and self._next_poll > time.time(): return False json = self._query_exhibitors(self._exhibitors) if not json: json = self._query_exhibitors(self._boot_exhibitors) if isinstance(json, dict) and 'servers' in json and 'port' in json: self._next_poll = time.time() + self._poll_interval servers: List[str] = json['servers'] zookeeper_hosts = ','.join([h + ':' + str(json['port']) for h in sorted(servers)]) if self._zookeeper_hosts != zookeeper_hosts: logger.info('ZooKeeper connection string has changed: %s => %s', self._zookeeper_hosts, zookeeper_hosts) self._zookeeper_hosts = zookeeper_hosts self._exhibitors = json['servers'] return True return False def _query_exhibitors(self, exhibitors: List[str]) -> Union[Dict[str, Any], Any]: random.shuffle(exhibitors) for host in exhibitors: try: response = requests_get(uri('http', (host, self._exhibitor_port), self._uri_path), timeout=self.TIMEOUT) return json.loads(response.data.decode('utf-8')) except Exception: logging.debug('Request to %s failed', host) return None @property def zookeeper_hosts(self) -> str: return self._zookeeper_hosts class Exhibitor(ZooKeeper): def __init__(self, config: Dict[str, Any]) -> None: interval = config.get('poll_interval', 300) self._ensemble_provider = ExhibitorEnsembleProvider(config['hosts'], config['port'], poll_interval=interval) super(Exhibitor, self).__init__({**config, 'hosts': self._ensemble_provider.zookeeper_hosts}) def _load_cluster( self, path: str, loader: Callable[[str], Union[Cluster, Dict[int, Cluster]]] ) -> Union[Cluster, Dict[int, Cluster]]: if self._ensemble_provider.poll(): self._client.set_hosts(self._ensemble_provider.zookeeper_hosts) return super(Exhibitor, self)._load_cluster(path, loader) patroni-3.2.2/patroni/dcs/kubernetes.py000066400000000000000000002010721455170150700201470ustar00rootroot00000000000000import atexit import base64 import datetime import functools import json import logging import os import random import socket import tempfile import time import urllib3 import yaml from collections import defaultdict from copy import deepcopy from http.client import HTTPException from urllib3.exceptions import HTTPError from threading import Condition, Lock, Thread from typing import Any, Callable, Collection, Dict, List, Optional, Tuple, Type, Union, TYPE_CHECKING from . import AbstractDCS, Cluster, ClusterConfig, Failover, Leader, Member, Status, SyncState, \ TimelineHistory, CITUS_COORDINATOR_GROUP_ID, citus_group_re from ..exceptions import DCSError from ..utils import deep_compare, iter_response_objects, keepalive_socket_options, \ Retry, RetryFailedError, tzutc, uri, USER_AGENT if TYPE_CHECKING: # pragma: no cover from ..config import Config logger = logging.getLogger(__name__) KUBE_CONFIG_DEFAULT_LOCATION = os.environ.get('KUBECONFIG', '~/.kube/config') SERVICE_HOST_ENV_NAME = 'KUBERNETES_SERVICE_HOST' SERVICE_PORT_ENV_NAME = 'KUBERNETES_SERVICE_PORT' SERVICE_TOKEN_FILENAME = '/var/run/secrets/kubernetes.io/serviceaccount/token' SERVICE_CERT_FILENAME = '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt' __temp_files: List[str] = [] class KubernetesError(DCSError): pass def _cleanup_temp_files() -> None: global __temp_files for temp_file in __temp_files: try: os.remove(temp_file) except OSError: pass __temp_files = [] def _create_temp_file(content: bytes) -> str: if len(__temp_files) == 0: atexit.register(_cleanup_temp_files) fd, name = tempfile.mkstemp() os.write(fd, content) os.close(fd) __temp_files.append(name) return name # this function does the same mapping of snake_case => camelCase for > 97% of cases as autogenerated swagger code def to_camel_case(value: str) -> str: reserved = {'api', 'apiv3', 'cidr', 'cpu', 'csi', 'id', 'io', 'ip', 'ipc', 'pid', 'tls', 'uri', 'url', 'uuid'} words = value.split('_') return words[0] + ''.join(w.upper() if w in reserved else w.title() for w in words[1:]) class K8sConfig(object): class ConfigException(Exception): pass def __init__(self) -> None: self.pool_config: Dict[str, Any] = {'maxsize': 10, 'num_pools': 10} # urllib3.PoolManager config self._token_expires_at = datetime.datetime.max self._headers: Dict[str, str] = {} self._make_headers() def _set_token(self, token: str) -> None: self._headers['authorization'] = 'Bearer ' + token def _make_headers(self, token: Optional[str] = None, **kwargs: Any) -> None: self._headers = urllib3.make_headers(user_agent=USER_AGENT, **kwargs) if token: self._set_token(token) def _read_token_file(self) -> str: if not os.path.isfile(SERVICE_TOKEN_FILENAME): raise self.ConfigException('Service token file does not exists.') with open(SERVICE_TOKEN_FILENAME) as f: token = f.read() if not token: raise self.ConfigException('Token file exists but empty.') self._token_expires_at = datetime.datetime.now() + self._token_refresh_interval return token def load_incluster_config(self, ca_certs: str = SERVICE_CERT_FILENAME, token_refresh_interval: datetime.timedelta = datetime.timedelta(minutes=1)) -> None: if SERVICE_HOST_ENV_NAME not in os.environ or SERVICE_PORT_ENV_NAME not in os.environ: raise self.ConfigException('Service host/port is not set.') if not os.environ[SERVICE_HOST_ENV_NAME] or not os.environ[SERVICE_PORT_ENV_NAME]: raise self.ConfigException('Service host/port is set but empty.') if not os.path.isfile(ca_certs): raise self.ConfigException('Service certificate file does not exists.') with open(ca_certs) as f: if not f.read(): raise self.ConfigException('Cert file exists but empty.') self.pool_config['ca_certs'] = ca_certs self._token_refresh_interval = token_refresh_interval token = self._read_token_file() self._make_headers(token=token) self._server = uri('https', (os.environ[SERVICE_HOST_ENV_NAME], os.environ[SERVICE_PORT_ENV_NAME])) @staticmethod def _get_by_name(config: Dict[str, List[Dict[str, Any]]], section: str, name: str) -> Optional[Dict[str, Any]]: for c in config[section + 's']: if c['name'] == name: return c[section] def _pool_config_from_file_or_data(self, config: Dict[str, str], file_key_name: str, pool_key_name: str) -> None: data_key_name = file_key_name + '-data' if data_key_name in config: self.pool_config[pool_key_name] = _create_temp_file(base64.b64decode(config[data_key_name])) elif file_key_name in config: self.pool_config[pool_key_name] = config[file_key_name] def load_kube_config(self, context: Optional[str] = None) -> None: with open(os.path.expanduser(KUBE_CONFIG_DEFAULT_LOCATION)) as f: config: Dict[str, Any] = yaml.safe_load(f) context = context or config['current-context'] if TYPE_CHECKING: # pragma: no cover assert isinstance(context, str) context_value = self._get_by_name(config, 'context', context) if TYPE_CHECKING: # pragma: no cover assert isinstance(context_value, dict) cluster = self._get_by_name(config, 'cluster', context_value['cluster']) if TYPE_CHECKING: # pragma: no cover assert isinstance(cluster, dict) user = self._get_by_name(config, 'user', context_value['user']) if TYPE_CHECKING: # pragma: no cover assert isinstance(user, dict) self._server = cluster['server'].rstrip('/') if self._server.startswith('https'): self._pool_config_from_file_or_data(user, 'client-certificate', 'cert_file') self._pool_config_from_file_or_data(user, 'client-key', 'key_file') self._pool_config_from_file_or_data(cluster, 'certificate-authority', 'ca_certs') self.pool_config['cert_reqs'] = 'CERT_NONE' if cluster.get('insecure-skip-tls-verify') else 'CERT_REQUIRED' if user.get('token'): self._make_headers(token=user['token']) elif 'username' in user and 'password' in user: self._make_headers(basic_auth=':'.join((user['username'], user['password']))) @property def server(self) -> str: return self._server @property def headers(self) -> Dict[str, str]: if self._token_expires_at <= datetime.datetime.now(): try: self._set_token(self._read_token_file()) except Exception as e: logger.error('Failed to refresh service account token: %r', e) return self._headers.copy() class K8sObject(object): def __init__(self, kwargs: Dict[str, Any]) -> None: self._dict = {k: self._wrap(k, v) for k, v in kwargs.items()} def get(self, name: str, default: Optional[Any] = None) -> Optional[Any]: return self._dict.get(name, default) def __getattr__(self, name: str) -> Any: return self.get(to_camel_case(name)) @classmethod def _wrap(cls, parent: Optional[str], value: Any) -> Any: if isinstance(value, dict): data_dict: Dict[str, Any] = value # we know that `annotations` and `labels` are dicts and therefore don't want to convert them into K8sObject return data_dict if parent in {'annotations', 'labels'} and \ all(isinstance(v, str) for v in data_dict.values()) else cls(data_dict) elif isinstance(value, list): data_list: List[Any] = value return [cls._wrap(None, v) for v in data_list] else: return value def to_dict(self) -> Dict[str, Any]: return self._dict def __repr__(self) -> str: return json.dumps(self, indent=4, default=lambda o: o.to_dict()) class K8sException(Exception): pass class K8sConnectionFailed(K8sException): pass class K8sClient(object): class rest(object): class ApiException(Exception): def __init__(self, status: Optional[int] = None, reason: Optional[str] = None, http_resp: Optional[urllib3.HTTPResponse] = None) -> None: self.status = http_resp.status if http_resp else status self.reason = http_resp.reason if http_resp else reason self.body = http_resp.data if http_resp else None self.headers = http_resp.headers if http_resp else None def __str__(self) -> str: error_message = "({0})\nReason: {1}\n".format(self.status, self.reason) if self.headers: error_message += "HTTP response headers: {0}\n".format(self.headers) if self.body: error_message += "HTTP response body: {0}\n".format(self.body) return error_message class ApiClient(object): _API_URL_PREFIX = '/api/v1/namespaces/' def __init__(self, bypass_api_service: Optional[bool] = False) -> None: self._bypass_api_service = bypass_api_service self.pool_manager = urllib3.PoolManager(**k8s_config.pool_config) self._base_uri = k8s_config.server self._api_servers_cache = [k8s_config.server] self._api_servers_cache_updated = 0 self.set_api_servers_cache_ttl(10) self.set_read_timeout(10) try: self._load_api_servers_cache() except K8sException: pass def set_read_timeout(self, timeout: Union[int, float]) -> None: self._read_timeout = timeout def set_api_servers_cache_ttl(self, ttl: int) -> None: self._api_servers_cache_ttl = ttl - 0.5 def set_base_uri(self, value: str) -> None: logger.info('Selected new K8s API server endpoint %s', value) # We will connect by IP of the K8s master node which is not listed as alternative name self.pool_manager.connection_pool_kw['assert_hostname'] = False self._base_uri = value @staticmethod def _handle_server_response(response: urllib3.HTTPResponse, _preload_content: bool) -> Union[urllib3.HTTPResponse, K8sObject]: if response.status not in range(200, 206): raise k8s_client.rest.ApiException(http_resp=response) return K8sObject(json.loads(response.data.decode('utf-8'))) if _preload_content else response @staticmethod def _make_headers(headers: Optional[Dict[str, str]]) -> Dict[str, str]: ret = k8s_config.headers ret.update(headers or {}) return ret @property def api_servers_cache(self) -> List[str]: base_uri, cache = self._base_uri, self._api_servers_cache return ([base_uri] if base_uri in cache else []) + [machine for machine in cache if machine != base_uri] def _get_api_servers(self, api_servers_cache: List[str]) -> List[str]: _, per_node_timeout, per_node_retries = self._calculate_timeouts(len(api_servers_cache)) headers = self._make_headers({}) kwargs = {'preload_content': True, 'retries': per_node_retries, 'timeout': urllib3.Timeout(connect=max(1.0, per_node_timeout / 2.0), total=per_node_timeout)} path = self._API_URL_PREFIX + 'default/endpoints/kubernetes' for base_uri in api_servers_cache: try: response = self.pool_manager.request('GET', base_uri + path, headers=headers, **kwargs) endpoint = self._handle_server_response(response, True) if TYPE_CHECKING: # pragma: no cover assert isinstance(endpoint, K8sObject) for subset in endpoint.subsets: for port in subset.ports: if port.name == 'https' and port.protocol == 'TCP': addresses = [uri('https', (a.ip, port.port)) for a in subset.addresses] if addresses: random.shuffle(addresses) return addresses except Exception as e: if isinstance(e, k8s_client.rest.ApiException) and e.status == 403: raise self.pool_manager.clear() logger.error('Failed to get "kubernetes" endpoint from %s: %r', base_uri, e) raise K8sConnectionFailed('No more K8s API server nodes in the cluster') def _refresh_api_servers_cache(self, updating_cache: Optional[bool] = False) -> None: if self._bypass_api_service: try: api_servers_cache = [k8s_config.server] if updating_cache else self.api_servers_cache self._api_servers_cache = self._get_api_servers(api_servers_cache) if updating_cache: self.pool_manager.clear() except k8s_client.rest.ApiException: # 403 Permission denied logger.warning("Kubernetes RBAC doesn't allow GET access to the 'kubernetes' " "endpoint in the 'default' namespace. Disabling 'bypass_api_service'.") self._bypass_api_service = False self._api_servers_cache = [k8s_config.server] if not updating_cache: self.pool_manager.clear() except K8sConnectionFailed: if updating_cache: raise K8sException("Could not get the list of K8s API server nodes") return else: self._api_servers_cache = [k8s_config.server] if self._base_uri not in self._api_servers_cache: self.set_base_uri(self._api_servers_cache[0]) self._api_servers_cache_updated = time.time() def refresh_api_servers_cache(self) -> None: if self._bypass_api_service and time.time() - self._api_servers_cache_updated > self._api_servers_cache_ttl: self._refresh_api_servers_cache() def _load_api_servers_cache(self) -> None: self._update_api_servers_cache = True self._refresh_api_servers_cache(True) self._update_api_servers_cache = False def _calculate_timeouts(self, api_servers: int, timeout: Optional[float] = None) -> Tuple[int, float, int]: """Calculate a request timeout and number of retries per single K8s API server node. In case if the timeout per node is too small (less than one second) we will reduce the number of nodes. For the cluster with only one API server node we will try to do 1 retry. No retries for clusters with 2 or more API server nodes. We better rely on switching to a different node.""" per_node_timeout = timeout = float(timeout or self._read_timeout) max_retries = 3 - min(api_servers, 2) per_node_retries = 1 min_timeout = 1.0 while api_servers > 0: per_node_timeout = float(timeout) / api_servers if per_node_timeout >= min_timeout: # for small clusters we will try to do more than one try on every node while per_node_retries < max_retries and per_node_timeout / (per_node_retries + 1) >= min_timeout: per_node_retries += 1 per_node_timeout /= per_node_retries break # if the timeout per one node is to small try to reduce number of nodes api_servers -= 1 max_retries = 1 return api_servers, per_node_timeout, per_node_retries - 1 def _do_http_request(self, retry: Optional[Retry], api_servers_cache: List[str], method: str, path: str, **kwargs: Any) -> urllib3.HTTPResponse: some_request_failed = False for i, base_uri in enumerate(api_servers_cache): if i > 0: logger.info('Retrying on %s', base_uri) try: response = self.pool_manager.request(method, base_uri + path, **kwargs) if some_request_failed: self.set_base_uri(base_uri) self._refresh_api_servers_cache() return response except (HTTPError, HTTPException, socket.error, socket.timeout) as e: self.pool_manager.clear() if not retry: # switch to the next node if request failed and retry is not allowed if i + 1 < len(api_servers_cache): self.set_base_uri(api_servers_cache[i + 1]) raise K8sException('{0} {1} request failed'.format(method, path)) logger.error('Request to server %s failed: %r', base_uri, e) some_request_failed = True raise K8sConnectionFailed('No more API server nodes in the cluster') def request( self, retry: Optional[Retry], method: str, path: str, timeout: Union[int, float, Tuple[Union[int, float], Union[int, float]], urllib3.Timeout, None] = None, **kwargs: Any) -> urllib3.HTTPResponse: if self._update_api_servers_cache: self._load_api_servers_cache() api_servers_cache = self.api_servers_cache api_servers = len(api_servers_cache) if timeout: if isinstance(timeout, (int, float)): timeout = urllib3.Timeout(total=timeout) elif isinstance(timeout, tuple) and len(timeout) == 2: timeout = urllib3.Timeout(connect=timeout[0], read=timeout[1]) retries = 0 else: _, timeout, retries = self._calculate_timeouts(api_servers) timeout = urllib3.Timeout(connect=max(1.0, timeout / 2.0), total=timeout) kwargs.update(retries=retries, timeout=timeout) while True: try: return self._do_http_request(retry, api_servers_cache, method, path, **kwargs) except K8sConnectionFailed as ex: try: self._load_api_servers_cache() api_servers_cache = self.api_servers_cache api_servers = len(api_servers_cache) except Exception as e: logger.debug('Failed to update list of K8s master nodes: %r', e) if TYPE_CHECKING: # pragma: no cover assert isinstance(retry, Retry) # K8sConnectionFailed is raised only if retry is not None! sleeptime = retry.sleeptime remaining_time = (retry.stoptime or time.time()) - sleeptime - time.time() nodes, timeout, retries = self._calculate_timeouts(api_servers, remaining_time) if nodes == 0: self._update_api_servers_cache = True raise ex retry.sleep_func(sleeptime) retry.update_delay() # We still have some time left. Partially reduce `api_servers_cache` and retry request kwargs.update(timeout=urllib3.Timeout(connect=max(1.0, timeout / 2.0), total=timeout), retries=retries) api_servers_cache = api_servers_cache[:nodes] def call_api(self, method: str, path: str, headers: Optional[Dict[str, str]] = None, body: Optional[Any] = None, _retry: Optional[Retry] = None, _preload_content: bool = True, _request_timeout: Optional[float] = None, **kwargs: Any) -> Union[urllib3.HTTPResponse, K8sObject]: headers = self._make_headers(headers) fields = {to_camel_case(k): v for k, v in kwargs.items()} # resource_version => resourceVersion body = json.dumps(body, default=lambda o: o.to_dict()) if body is not None else None response = self.request(_retry, method, self._API_URL_PREFIX + path, headers=headers, fields=fields, body=body, preload_content=_preload_content, timeout=_request_timeout) return self._handle_server_response(response, _preload_content) class CoreV1Api(object): def __init__(self, api_client: Optional['K8sClient.ApiClient'] = None) -> None: self._api_client = api_client or k8s_client.ApiClient() def __getattr__(self, func: str) -> Callable[..., Any]: # `func` name pattern: (action)_namespaced_(kind) action, kind = func.split('_namespaced_') # (read|list|create|patch|replace|delete|delete_collection) kind = kind.replace('_', '') + ('s' * int(kind[-1] != 's')) # plural, single word def wrapper(*args: Any, **kwargs: Any) -> Union[urllib3.HTTPResponse, K8sObject]: method = {'read': 'GET', 'list': 'GET', 'create': 'POST', 'replace': 'PUT'}.get(action, action.split('_')[0]).upper() if action == 'create' or len(args) == 1: # namespace is a first argument and name in not in arguments path = '/'.join([args[0], kind]) else: # name, namespace followed by optional body path = '/'.join([args[1], kind, args[0]]) headers = {'Content-Type': 'application/strategic-merge-patch+json'} if action == 'patch' else {} if len(args) == 3: # name, namespace, body body = args[2] elif action == 'create': # namespace, body body = args[1] elif action == 'delete': # name, namespace body = kwargs.pop('body', None) else: body = None return self._api_client.call_api(method, path, headers, body, **kwargs) return wrapper class _K8sObjectTemplate(K8sObject): """The template for objects which we create locally, e.g. k8s_client.V1ObjectMeta & co""" def __init__(self, **kwargs: Any) -> None: self._dict = {to_camel_case(k): v for k, v in kwargs.items()} def __init__(self) -> None: self.__cls_cache: Dict[str, Type['K8sClient._K8sObjectTemplate']] = {} self.__cls_lock = Lock() def __getattr__(self, name: str) -> Type['K8sClient._K8sObjectTemplate']: with self.__cls_lock: if name not in self.__cls_cache: self.__cls_cache[name] = type(name, (self._K8sObjectTemplate,), {}) return self.__cls_cache[name] k8s_client = K8sClient() k8s_config = K8sConfig() class KubernetesRetriableException(k8s_client.rest.ApiException): def __init__(self, orig: K8sClient.rest.ApiException) -> None: super(KubernetesRetriableException, self).__init__(orig.status, orig.reason) self.body = orig.body self.headers = orig.headers @property def sleeptime(self) -> Optional[int]: try: return int((self.headers or {}).get('retry-after', '')) except Exception: return None class CoreV1ApiProxy(object): """Proxy class to work with k8s_client.CoreV1Api() object""" _DEFAULT_RETRIABLE_HTTP_CODES = frozenset([500, 503, 504]) def __init__(self, use_endpoints: Optional[bool] = False, bypass_api_service: Optional[bool] = False) -> None: self._api_client = k8s_client.ApiClient(bypass_api_service) self._core_v1_api = k8s_client.CoreV1Api(self._api_client) self._use_endpoints = bool(use_endpoints) self._retriable_http_codes = set(self._DEFAULT_RETRIABLE_HTTP_CODES) def configure_timeouts(self, loop_wait: int, retry_timeout: Union[int, float], ttl: int) -> None: # Normally every loop_wait seconds we should have receive something from the socket. # If we didn't received anything after the loop_wait + retry_timeout it is a time # to start worrying (send keepalive messages). Finally, the connection should be # considered as dead if we received nothing from the socket after the ttl seconds. self._api_client.pool_manager.connection_pool_kw['socket_options'] = \ list(keepalive_socket_options(ttl, int(loop_wait + retry_timeout))) self._api_client.set_read_timeout(retry_timeout) self._api_client.set_api_servers_cache_ttl(loop_wait) def configure_retriable_http_codes(self, retriable_http_codes: List[int]) -> None: self._retriable_http_codes = self._DEFAULT_RETRIABLE_HTTP_CODES | set(retriable_http_codes) def refresh_api_servers_cache(self) -> None: self._api_client.refresh_api_servers_cache() def __getattr__(self, func: str) -> Callable[..., Any]: """Intercepts calls to `CoreV1Api` methods. Handles two important cases: 1. Depending on whether Patroni is configured to work with `ConfigMaps` or `Endpoints` it remaps "virtual" method names from `*_kind` to `*_endpoints` or `*_config_map`. 2. It handles HTTP error codes and raises `KubernetesRetriableException` if the given error is supposed to be handled with retry.""" if func.endswith('_kind'): func = func[:-4] + ('endpoints' if self._use_endpoints else 'config_map') def wrapper(*args: Any, **kwargs: Any) -> Any: try: return getattr(self._core_v1_api, func)(*args, **kwargs) except k8s_client.rest.ApiException as e: if e.status in self._retriable_http_codes or e.headers and 'retry-after' in e.headers: raise KubernetesRetriableException(e) raise return wrapper @property def use_endpoints(self) -> bool: return self._use_endpoints def _run_and_handle_exceptions(method: Callable[..., Any], *args: Any, **kwargs: Any) -> Any: try: return method(*args, **kwargs) except k8s_client.rest.ApiException as e: if e.status == 403: logger.exception('Permission denied') elif e.status != 409: # Object exists or conflict in resource_version logger.exception('Unexpected error from Kubernetes API') return False except (RetryFailedError, K8sException) as e: raise KubernetesError(e) def catch_kubernetes_errors(func: Callable[..., Any]) -> Callable[..., Any]: def wrapper(self: 'Kubernetes', *args: Any, **kwargs: Any) -> Any: try: return _run_and_handle_exceptions(func, self, *args, **kwargs) except KubernetesError: return False return wrapper class ObjectCache(Thread): def __init__(self, dcs: 'Kubernetes', func: Callable[..., Any], retry: Retry, condition: Condition, name: Optional[str] = None) -> None: super(ObjectCache, self).__init__() self.daemon = True self._dcs = dcs self._func = func self._retry = retry self._condition = condition self._name = name # name of this pod self._is_ready = False self._response: Union[urllib3.HTTPResponse, bool, None] = None # needs to be accessible from the `kill_stream` self._response_lock = Lock() # protect the `self._response` from concurrent access self._object_cache: Dict[str, K8sObject] = {} self._object_cache_lock = Lock() self._annotations_map = {self._dcs.leader_path: getattr(self._dcs, '_LEADER'), self._dcs.config_path: getattr(self._dcs, '_CONFIG')} # pyright self.start() def _list(self) -> K8sObject: try: return self._func(_retry=self._retry.copy()) except Exception: time.sleep(1) raise def _watch(self, resource_version: str) -> urllib3.HTTPResponse: return self._func(_request_timeout=(self._retry.deadline, urllib3.Timeout.DEFAULT_TIMEOUT), _preload_content=False, watch=True, resource_version=resource_version) def set(self, name: str, value: K8sObject) -> Tuple[bool, Optional[K8sObject]]: with self._object_cache_lock: old_value = self._object_cache.get(name) ret = not old_value or int(old_value.metadata.resource_version) < int(value.metadata.resource_version) if ret: self._object_cache[name] = value return ret, old_value def delete(self, name: str, resource_version: str) -> Tuple[bool, Optional[K8sObject]]: with self._object_cache_lock: old_value = self._object_cache.get(name) ret = old_value and int(old_value.metadata.resource_version) < int(resource_version) if ret: del self._object_cache[name] return bool(not old_value or ret), old_value def copy(self) -> Dict[str, K8sObject]: with self._object_cache_lock: return self._object_cache.copy() def get(self, name: str) -> Optional[K8sObject]: with self._object_cache_lock: return self._object_cache.get(name) def _process_event(self, event: Dict[str, Union[Any, Dict[str, Union[Any, Dict[str, Any]]]]]) -> None: ev_type = event['type'] obj = event['object'] name = obj['metadata']['name'] new_value = None if ev_type in ('ADDED', 'MODIFIED'): obj = K8sObject(obj) success, old_value = self.set(name, obj) if success: new_value = (obj.metadata.annotations or {}).get(self._annotations_map.get(name)) elif ev_type == 'DELETED': success, old_value = self.delete(name, obj['metadata']['resourceVersion']) else: return logger.warning('Unexpected event type: %s', ev_type) if success and obj.get('kind') != 'Pod': if old_value: old_value = (old_value.metadata.annotations or {}).get(self._annotations_map.get(name)) value_changed = old_value != new_value and \ (name != self._dcs.config_path or old_value is not None and new_value is not None) if value_changed: logger.debug('%s changed from %s to %s', name, old_value, new_value) # Do not wake up HA loop if we run as leader and received leader object update event if value_changed or name == self._dcs.leader_path and self._name != new_value: self._dcs.event.set() @staticmethod def _finish_response(response: urllib3.HTTPResponse) -> None: try: response.close() finally: response.release_conn() def _do_watch(self, resource_version: str) -> None: with self._response_lock: self._response = None response = self._watch(resource_version) with self._response_lock: if self._response is None: self._response = response if not self._response: return self._finish_response(response) for event in iter_response_objects(response): if event['object'].get('code') == 410: break self._process_event(event) def _build_cache(self) -> None: objects = self._list() with self._object_cache_lock: self._object_cache = {item.metadata.name: item for item in objects.items} with self._condition: self._is_ready = True self._condition.notify() try: self._do_watch(objects.metadata.resource_version) finally: with self._condition: self._is_ready = False with self._response_lock: response, self._response = self._response, None if isinstance(response, urllib3.HTTPResponse): self._finish_response(response) def kill_stream(self) -> None: sock = None with self._response_lock: if isinstance(self._response, urllib3.HTTPResponse): try: sock = self._response.connection.sock if self._response.connection else None except Exception: sock = None else: self._response = False if sock: try: sock.shutdown(socket.SHUT_RDWR) sock.close() except Exception as e: logger.debug('Error on socket.shutdown: %r', e) def run(self) -> None: while True: try: self._build_cache() except Exception as e: logger.error('ObjectCache.run %r', e) def is_ready(self) -> bool: """Must be called only when holding the lock on `_condition`""" return self._is_ready class Kubernetes(AbstractDCS): _CITUS_LABEL = 'citus-group' def __init__(self, config: Dict[str, Any]) -> None: self._labels = deepcopy(config['labels']) self._labels[config.get('scope_label', 'cluster-name')] = config['scope'] self._label_selector = ','.join('{0}={1}'.format(k, v) for k, v in self._labels.items()) self._namespace = config.get('namespace') or 'default' self._role_label = config.get('role_label', 'role') self._leader_label_value = config.get('leader_label_value', 'master') self._follower_label_value = config.get('follower_label_value', 'replica') self._standby_leader_label_value = config.get('standby_leader_label_value', 'master') self._tmp_role_label = config.get('tmp_role_label') self._ca_certs = os.environ.get('PATRONI_KUBERNETES_CACERT', config.get('cacert')) or SERVICE_CERT_FILENAME super(Kubernetes, self).__init__({**config, 'namespace': ''}) if self._citus_group: self._labels[self._CITUS_LABEL] = self._citus_group self._retry = Retry(deadline=config['retry_timeout'], max_delay=1, max_tries=-1, retry_exceptions=KubernetesRetriableException) self._ttl = int(config.get('ttl') or 30) try: k8s_config.load_incluster_config(ca_certs=self._ca_certs) except k8s_config.ConfigException: k8s_config.load_kube_config(context=config.get('context', 'kind-kind')) self.__ips: List[str] = [] if self._ctl else [config.get('pod_ip', '')] self.__ports: List[K8sObject] = [] ports: List[Dict[str, Any]] = config.get('ports', [{}]) for p in ports: port: Dict[str, Any] = {'port': int(p.get('port', '5432'))} port.update({n: p[n] for n in ('name', 'protocol') if p.get(n)}) self.__ports.append(k8s_client.V1EndpointPort(**port)) bypass_api_service = not self._ctl and config.get('bypass_api_service') self._api = CoreV1ApiProxy(config.get('use_endpoints'), bypass_api_service) self._should_create_config_service = self._api.use_endpoints self.reload_config(config) # leader_observed_record, leader_resource_version, and leader_observed_time are used only for leader race! self._leader_observed_record: Dict[str, str] = {} self._leader_observed_time = None self._leader_resource_version = None self.__do_not_watch = False self._condition = Condition() pods_func = functools.partial(self._api.list_namespaced_pod, self._namespace, label_selector=self._label_selector) self._pods = ObjectCache(self, pods_func, self._retry, self._condition) kinds_func = functools.partial(self._api.list_namespaced_kind, self._namespace, label_selector=self._label_selector) self._kinds = ObjectCache(self, kinds_func, self._retry, self._condition, self._name) def retry(self, method: Callable[..., Any], *args: Any, **kwargs: Any) -> Any: retry = self._retry.copy() kwargs['_retry'] = retry return retry(method, *args, **kwargs) def client_path(self, path: str) -> str: return super(Kubernetes, self).client_path(path)[1:].replace('/', '-') @property def leader_path(self) -> str: return super(Kubernetes, self).leader_path[:-7 if self._api.use_endpoints else None] def set_ttl(self, ttl: int) -> Optional[bool]: ttl = int(ttl) self.__do_not_watch = self._ttl != ttl self._ttl = ttl return None @property def ttl(self) -> int: return self._ttl def set_retry_timeout(self, retry_timeout: int) -> None: self._retry.deadline = retry_timeout def reload_config(self, config: Union['Config', Dict[str, Any]]) -> None: """Handles dynamic config changes. Either cause by changes in the local configuration file + SIGHUP or by changes of dynamic configuration""" super(Kubernetes, self).reload_config(config) if TYPE_CHECKING: # pragma: no cover assert self._retry.deadline is not None self._api.configure_timeouts(self.loop_wait, self._retry.deadline, self.ttl) # retriable_http_codes supposed to be either int, list of integers or comma-separated string with integers. retriable_http_codes: Union[str, List[Union[str, int]]] = config.get('retriable_http_codes', []) if not isinstance(retriable_http_codes, list): retriable_http_codes = [c.strip() for c in str(retriable_http_codes).split(',')] try: self._api.configure_retriable_http_codes([int(c) for c in retriable_http_codes]) except Exception as e: logger.warning('Invalid value of retriable_http_codes = %s: %r', config['retriable_http_codes'], e) @staticmethod def member(pod: K8sObject) -> Member: annotations = pod.metadata.annotations or {} member = Member.from_node(pod.metadata.resource_version, pod.metadata.name, None, annotations.get('status', '')) member.data['pod_labels'] = pod.metadata.labels return member def _wait_caches(self, stop_time: float) -> None: while not (self._pods.is_ready() and self._kinds.is_ready()): timeout = stop_time - time.time() if timeout <= 0: raise RetryFailedError('Exceeded retry deadline') self._condition.wait(timeout) def _cluster_from_nodes(self, group: str, nodes: Dict[str, K8sObject], pods: Collection[K8sObject]) -> Cluster: members = [self.member(pod) for pod in pods] path = self._base_path[1:] + '-' if group: path += group + '-' config = nodes.get(path + self._CONFIG) metadata = config and config.metadata annotations = metadata and metadata.annotations or {} # get initialize flag initialize = annotations.get(self._INITIALIZE) # get global dynamic configuration config = metadata and ClusterConfig.from_node(metadata.resource_version, annotations.get(self._CONFIG) or '{}', metadata.resource_version if self._CONFIG in annotations else 0) # get timeline history history = metadata and TimelineHistory.from_node(metadata.resource_version, annotations.get(self._HISTORY) or '[]') leader_path = path[:-1] if self._api.use_endpoints else path + self._LEADER leader = nodes.get(leader_path) metadata = leader and leader.metadata if leader_path == self.leader_path: # We want to memorize leader_resource_version only for our cluster self._leader_resource_version = metadata.resource_version if metadata else None annotations: Dict[str, str] = metadata and metadata.annotations or {} # get last known leader lsn and slots status = Status.from_node(annotations) # get failsafe topology try: failsafe = json.loads(annotations.get(self._FAILSAFE, '')) except Exception: failsafe = None # get leader leader_record: Dict[str, str] = {n: annotations[n] for n in (self._LEADER, 'acquireTime', 'ttl', 'renewTime', 'transitions') if n in annotations} # We want to memorize leader_observed_record and update leader_observed_time only for our cluster if leader_path == self.leader_path and (leader_record or self._leader_observed_record)\ and leader_record != self._leader_observed_record: self._leader_observed_record = leader_record self._leader_observed_time = time.time() leader = leader_record.get(self._LEADER) try: ttl = int(leader_record.get('ttl', self._ttl)) or self._ttl except (TypeError, ValueError): ttl = self._ttl # We want to check validity of the leader record only for our own cluster if leader_path == self.leader_path and\ not (metadata and self._leader_observed_time and self._leader_observed_time + ttl >= time.time()): leader = None if metadata: member = Member(-1, leader or '', None, {}) member = ([m for m in members if m.name == leader] or [member])[0] leader = Leader(metadata.resource_version, None, member) else: leader = None # failover key failover = nodes.get(path + self._FAILOVER) metadata = failover and failover.metadata failover = metadata and Failover.from_node(metadata.resource_version, (metadata.annotations or {}).copy()) # get synchronization state sync = nodes.get(path + self._SYNC) metadata = sync and sync.metadata sync = SyncState.from_node(metadata and metadata.resource_version, metadata and metadata.annotations) return Cluster(initialize, config, leader, status, members, failover, sync, history, failsafe) def _cluster_loader(self, path: Dict[str, Any]) -> Cluster: return self._cluster_from_nodes(path['group'], path['nodes'], path['pods'].values()) def _citus_cluster_loader(self, path: Dict[str, Any]) -> Dict[int, Cluster]: clusters: Dict[str, Dict[str, Dict[str, K8sObject]]] = defaultdict(lambda: defaultdict(dict)) for name, pod in path['pods'].items(): group = pod.metadata.labels.get(self._CITUS_LABEL) if group and citus_group_re.match(group): clusters[group]['pods'][name] = pod for name, kind in path['nodes'].items(): group = kind.metadata.labels.get(self._CITUS_LABEL) if group and citus_group_re.match(group): clusters[group]['nodes'][name] = kind return {int(group): self._cluster_from_nodes(group, value['nodes'], value['pods'].values()) for group, value in clusters.items()} def __load_cluster( self, group: Optional[str], loader: Callable[[Dict[str, Any]], Union[Cluster, Dict[int, Cluster]]] ) -> Union[Cluster, Dict[int, Cluster]]: if TYPE_CHECKING: # pragma: no cover assert self._retry.deadline is not None stop_time = time.time() + self._retry.deadline self._api.refresh_api_servers_cache() try: with self._condition: self._wait_caches(stop_time) pods = {name: pod for name, pod in self._pods.copy().items() if not group or pod.metadata.labels.get(self._CITUS_LABEL) == group} nodes = {name: kind for name, kind in self._kinds.copy().items() if not group or kind.metadata.labels.get(self._CITUS_LABEL) == group} return loader({'group': group, 'pods': pods, 'nodes': nodes}) except Exception: logger.exception('get_cluster') raise KubernetesError('Kubernetes API is not responding properly') def _load_cluster( self, path: str, loader: Callable[[Any], Union[Cluster, Dict[int, Cluster]]] ) -> Union[Cluster, Dict[int, Cluster]]: group = self._citus_group if path == self.client_path('') else None return self.__load_cluster(group, loader) def get_citus_coordinator(self) -> Optional[Cluster]: try: ret = self.__load_cluster(str(CITUS_COORDINATOR_GROUP_ID), self._cluster_loader) if TYPE_CHECKING: # pragma: no cover assert isinstance(ret, Cluster) return ret except Exception as e: logger.error('Failed to load Citus coordinator cluster from Kubernetes: %r', e) @staticmethod def compare_ports(p1: K8sObject, p2: K8sObject) -> bool: return p1.name == p2.name and p1.port == p2.port and (p1.protocol or 'TCP') == (p2.protocol or 'TCP') @staticmethod def subsets_changed(last_observed_subsets: List[K8sObject], ip: str, ports: List[K8sObject]) -> bool: """ >>> ip = '1.2.3.4' >>> a = [k8s_client.V1EndpointAddress(ip=ip)] >>> s = [k8s_client.V1EndpointSubset(addresses=a)] >>> Kubernetes.subsets_changed(s, '1.2.3.5', []) True >>> s = [k8s_client.V1EndpointSubset(addresses=a, ports=[k8s_client.V1EndpointPort(protocol='TCP', port=1)])] >>> Kubernetes.subsets_changed(s, '1.2.3.4', [k8s_client.V1EndpointPort(port=5432)]) True >>> p1 = k8s_client.V1EndpointPort(name='port1', port=1) >>> p2 = k8s_client.V1EndpointPort(name='port2', port=2) >>> p3 = k8s_client.V1EndpointPort(name='port3', port=3) >>> s = [k8s_client.V1EndpointSubset(addresses=a, ports=[p1, p2])] >>> Kubernetes.subsets_changed(s, ip, [p2, p3]) True >>> s2 = [k8s_client.V1EndpointSubset(addresses=a, ports=[p2, p1])] >>> Kubernetes.subsets_changed(s, ip, [p2, p1]) False """ if len(last_observed_subsets) != 1: return True if len(last_observed_subsets[0].addresses or []) != 1 or \ last_observed_subsets[0].addresses[0].ip != ip or \ len(last_observed_subsets[0].ports) != len(ports): return True if len(ports) == 1: return not Kubernetes.compare_ports(last_observed_subsets[0].ports[0], ports[0]) observed_ports = {p.name: p for p in last_observed_subsets[0].ports} for p in ports: if p.name not in observed_ports or not Kubernetes.compare_ports(p, observed_ports.pop(p.name)): return True return False def __target_ref(self, leader_ip: str, latest_subsets: List[K8sObject], pod: K8sObject) -> K8sObject: # we want to re-use existing target_ref if possible for subset in latest_subsets: for address in subset.addresses or []: if address.ip == leader_ip and address.target_ref and address.target_ref.name == self._name: return address.target_ref return k8s_client.V1ObjectReference(kind='Pod', uid=pod.metadata.uid, namespace=self._namespace, name=self._name, resource_version=pod.metadata.resource_version) def _map_subsets(self, endpoints: Dict[str, Any], ips: List[str]) -> None: leader = self._kinds.get(self.leader_path) latest_subsets = leader and leader.subsets or [] if not ips: # We want to have subsets empty if latest_subsets: endpoints['subsets'] = [] return pod = self._pods.get(self._name) leader_ip = ips[0] or pod and pod.status.pod_ip # don't touch subsets if our (leader) ip is unknown or subsets is valid if leader_ip and self.subsets_changed(latest_subsets, leader_ip, self.__ports): kwargs = {'hostname': pod.spec.hostname, 'node_name': pod.spec.node_name, 'target_ref': self.__target_ref(leader_ip, latest_subsets, pod)} if pod else {} address = k8s_client.V1EndpointAddress(ip=leader_ip, **kwargs) endpoints['subsets'] = [k8s_client.V1EndpointSubset(addresses=[address], ports=self.__ports)] def _patch_or_create(self, name: str, annotations: Dict[str, Any], resource_version: Optional[str] = None, patch: bool = False, retry: Optional[Callable[..., Any]] = None, ips: Optional[List[str]] = None) -> K8sObject: """Patch or create K8s object, Endpoint or ConfigMap. :param name: the name of the object. :param annotations: mapping of annotations that we want to create/update. :param resource_version: object should be updated only if the ``resource_version`` matches provided value. :param patch: ``True`` if we know in advance that the object already exists and we should patch it. :param retry: a callable that will take care of retries :param ips: IP address that we want to put to the subsets of the endpoint. Could have following values: * ``None`` - when we don't need to touch subset; * ``[]`` - to set subsets to the empty list, when :meth:`delete_leader` method is called; * ``['ip.add.re.ss']`` - when we want to make sure that the subsets of the leader endpoint contains the IP address of the leader, that we get from the ``kubernetes.pod_ip``; * ``['']`` - when we want to make sure that the subsets of the leader endpoint contains the IP address of the leader, but ``kubernetes.pod_ip`` configuration is missing. In this case we will try to take the IP address of the Pod which name matches ``name`` from the config file. :returns: the new :class:`V1Endpoints` or :class:`V1ConfigMap` object, that was created or updated. """ metadata = {'namespace': self._namespace, 'name': name, 'labels': self._labels, 'annotations': annotations} if patch or resource_version: if resource_version is not None: metadata['resource_version'] = resource_version func = functools.partial(self._api.patch_namespaced_kind, name) metadata['annotations'] = annotations else: func = functools.partial(self._api.create_namespaced_kind) # skip annotations with null values metadata['annotations'] = {k: v for k, v in annotations.items() if v is not None} metadata = k8s_client.V1ObjectMeta(**metadata) if self._api.use_endpoints: endpoints = {'metadata': metadata} if ips is not None: self._map_subsets(endpoints, ips) body = k8s_client.V1Endpoints(**endpoints) else: body = k8s_client.V1ConfigMap(metadata=metadata) ret = retry(func, self._namespace, body) if retry else func(self._namespace, body) if ret: self._kinds.set(name, ret) return ret @catch_kubernetes_errors def patch_or_create(self, name: str, annotations: Dict[str, Any], resource_version: Optional[str] = None, patch: bool = False, retry: bool = True, ips: Optional[List[str]] = None) -> K8sObject: try: return self._patch_or_create(name, annotations, resource_version, patch, self.retry if retry else None, ips) except k8s_client.rest.ApiException as e: if e.status == 409 and resource_version: # Conflict in resource_version # Terminate watchers, it could be a sign that K8s API is in a failed state self._kinds.kill_stream() self._pods.kill_stream() raise e def patch_or_create_config(self, annotations: Dict[str, Any], resource_version: Optional[str] = None, patch: bool = False, retry: bool = True) -> bool: # SCOPE-config endpoint requires corresponding service otherwise it might be "cleaned" by k8s master if self._api.use_endpoints and not patch and not resource_version: self._should_create_config_service = True self._create_config_service() return bool(self.patch_or_create(self.config_path, annotations, resource_version, patch, retry)) def _create_config_service(self) -> None: metadata = k8s_client.V1ObjectMeta(namespace=self._namespace, name=self.config_path, labels=self._labels) body = k8s_client.V1Service(metadata=metadata, spec=k8s_client.V1ServiceSpec(cluster_ip='None')) try: if not self._api.create_namespaced_service(self._namespace, body): return except Exception as e: # 409 - service already exists, 403 - creation forbidden if not isinstance(e, k8s_client.rest.ApiException) or e.status not in (409, 403): return logger.exception('create_config_service failed') self._should_create_config_service = False def _write_leader_optime(self, last_lsn: str) -> bool: """Unused""" raise NotImplementedError # pragma: no cover def _write_status(self, value: str) -> bool: """Unused""" raise NotImplementedError # pragma: no cover def _write_failsafe(self, value: str) -> bool: """Unused""" raise NotImplementedError # pragma: no cover def _update_leader(self, leader: Leader) -> bool: """Unused""" raise NotImplementedError # pragma: no cover def write_leader_optime(self, last_lsn: int) -> None: """Write value for WAL LSN to ``optime`` annotation of the leader object. :param last_lsn: absolute WAL LSN in bytes. """ self.patch_or_create(self.leader_path, {self._OPTIME: str(last_lsn)}, patch=True, retry=False) def _update_leader_with_retry(self, annotations: Dict[str, Any], resource_version: Optional[str], ips: List[str]) -> bool: retry = self._retry.copy() def _retry(*args: Any, **kwargs: Any) -> Any: kwargs['_retry'] = retry return retry(*args, **kwargs) try: return bool(self._patch_or_create(self.leader_path, annotations, resource_version, ips=ips, retry=_retry)) except k8s_client.rest.ApiException as e: if e.status == 409: logger.warning('Concurrent update of %s', self.leader_path) else: logger.exception('Permission denied' if e.status == 403 else 'Unexpected error from Kubernetes API') return False except (RetryFailedError, K8sException) as e: raise KubernetesError(e) # if we are here, that means update failed with 409 if not retry.ensure_deadline(1): return False # No time for retry. Tell ha.py that we have to demote due to failed update. # Try to get the latest version directly from K8s API instead of relying on async cache try: kind = _retry(self._api.read_namespaced_kind, self.leader_path, self._namespace) except (RetryFailedError, K8sException) as e: raise KubernetesError(e) except Exception as e: logger.error('Failed to get the leader object "%s": %r', self.leader_path, e) return False self._kinds.set(self.leader_path, kind) if not retry.ensure_deadline(0.5): return False kind_annotations = kind and kind.metadata.annotations or {} kind_resource_version = kind and kind.metadata.resource_version # There is different leader or resource_version in cache didn't change if kind and (kind_annotations.get(self._LEADER) != self._name or kind_resource_version == resource_version): return False return bool(_run_and_handle_exceptions(self._patch_or_create, self.leader_path, annotations, kind_resource_version, ips=ips, retry=_retry)) def update_leader(self, leader: Leader, last_lsn: Optional[int], slots: Optional[Dict[str, int]] = None, failsafe: Optional[Dict[str, str]] = None) -> bool: kind = self._kinds.get(self.leader_path) kind_annotations = kind and kind.metadata.annotations or {} if kind and kind_annotations.get(self._LEADER) != self._name: return False now = datetime.datetime.now(tzutc).isoformat() leader_observed_record = kind_annotations or self._leader_observed_record annotations = {self._LEADER: self._name, 'ttl': str(self._ttl), 'renewTime': now, 'acquireTime': leader_observed_record.get('acquireTime') or now, 'transitions': leader_observed_record.get('transitions') or '0'} if last_lsn: annotations[self._OPTIME] = str(last_lsn) annotations['slots'] = json.dumps(slots, separators=(',', ':')) if slots else None if failsafe is not None: annotations[self._FAILSAFE] = json.dumps(failsafe, separators=(',', ':')) if failsafe else None resource_version = kind and kind.metadata.resource_version return self._update_leader_with_retry(annotations, resource_version, self.__ips) def attempt_to_acquire_leader(self) -> bool: now = datetime.datetime.now(tzutc).isoformat() annotations = {self._LEADER: self._name, 'ttl': str(self._ttl), 'renewTime': now, 'acquireTime': now, 'transitions': '0'} if self._leader_observed_record: try: transitions = int(self._leader_observed_record.get('transitions', '')) except (TypeError, ValueError): transitions = 0 if self._leader_observed_record.get(self._LEADER) != self._name: transitions += 1 else: annotations['acquireTime'] = self._leader_observed_record.get('acquireTime') or now annotations['transitions'] = str(transitions) try: ret = bool(self._patch_or_create(self.leader_path, annotations, self._leader_resource_version, retry=self.retry, ips=self.__ips)) except k8s_client.rest.ApiException as e: if e.status == 409 and self._leader_resource_version: # Conflict in resource_version # Terminate watchers, it could be a sign that K8s API is in a failed state self._kinds.kill_stream() self._pods.kill_stream() ret = False except (RetryFailedError, K8sException) as e: raise KubernetesError(e) if not ret: logger.info('Could not take out TTL lock') return ret def take_leader(self) -> bool: return self.attempt_to_acquire_leader() def set_failover_value(self, value: str, version: Optional[str] = None) -> bool: """Unused""" raise NotImplementedError # pragma: no cover def manual_failover(self, leader: Optional[str], candidate: Optional[str], scheduled_at: Optional[datetime.datetime] = None, version: Optional[str] = None) -> bool: annotations = {'leader': leader or None, 'member': candidate or None, 'scheduled_at': scheduled_at and scheduled_at.isoformat()} patch = bool(self.cluster and isinstance(self.cluster.failover, Failover) and self.cluster.failover.version) return bool(self.patch_or_create(self.failover_path, annotations, version, bool(version or patch), False)) @property def _config_resource_version(self) -> Optional[str]: config = self._kinds.get(self.config_path) return config and config.metadata.resource_version def set_config_value(self, value: str, version: Optional[str] = None) -> bool: return self.patch_or_create_config({self._CONFIG: value}, version, bool(self._config_resource_version), False) @catch_kubernetes_errors def touch_member(self, data: Dict[str, Any]) -> bool: cluster = self.cluster if cluster and cluster.leader and cluster.leader.name == self._name: role = self._standby_leader_label_value if data['role'] == 'standby_leader' else self._leader_label_value tmp_role = 'master' elif data['state'] == 'running' and data['role'] not in ('master', 'primary'): role = {'replica': self._follower_label_value}.get(data['role'], data['role']) tmp_role = data['role'] else: role = None tmp_role = None role_labels = {self._role_label: role} if self._tmp_role_label: role_labels[self._tmp_role_label] = tmp_role member = cluster and cluster.get_member(self._name, fallback_to_leader=False) pod_labels = member and member.data.pop('pod_labels', None) ret = member and pod_labels is not None\ and all(pod_labels.get(k) == v for k, v in role_labels.items())\ and deep_compare(data, member.data) if not ret: metadata = {'namespace': self._namespace, 'name': self._name, 'labels': role_labels, 'annotations': {'status': json.dumps(data, separators=(',', ':'))}} body = k8s_client.V1Pod(metadata=k8s_client.V1ObjectMeta(**metadata)) ret = self._api.patch_namespaced_pod(self._name, self._namespace, body) if ret: self._pods.set(self._name, ret) if self._should_create_config_service: self._create_config_service() return bool(ret) def initialize(self, create_new: bool = True, sysid: str = "") -> bool: cluster = self.cluster resource_version = str(cluster.config.version)\ if cluster and cluster.config and cluster.config.version else None return self.patch_or_create_config({self._INITIALIZE: sysid}, resource_version) def _delete_leader(self, leader: Leader) -> bool: """Unused""" raise NotImplementedError # pragma: no cover def delete_leader(self, leader: Optional[Leader], last_lsn: Optional[int] = None) -> bool: ret = False kind = self._kinds.get(self.leader_path) if kind and (kind.metadata.annotations or {}).get(self._LEADER) == self._name: annotations: Dict[str, Optional[str]] = {self._LEADER: None} if last_lsn: annotations[self._OPTIME] = str(last_lsn) ret = self.patch_or_create(self.leader_path, annotations, kind.metadata.resource_version, True, False, []) self.reset_cluster() return ret def cancel_initialization(self) -> bool: return self.patch_or_create_config({self._INITIALIZE: None}, None, True) @catch_kubernetes_errors def delete_cluster(self) -> bool: return bool(self.retry(self._api.delete_collection_namespaced_kind, self._namespace, label_selector=self._label_selector)) def set_history_value(self, value: str) -> bool: return self.patch_or_create_config({self._HISTORY: value}, None, bool(self._config_resource_version), False) def set_sync_state_value(self, value: str, version: Optional[str] = None) -> bool: """Unused""" raise NotImplementedError # pragma: no cover def write_sync_state(self, leader: Optional[str], sync_standby: Optional[Collection[str]], version: Optional[str] = None) -> Optional[SyncState]: """Prepare and write annotations to $SCOPE-sync Endpoint or ConfigMap. :param leader: name of the leader node that manages /sync key :param sync_standby: collection of currently known synchronous standby node names :param version: last known `resource_version` for conditional update of the object :returns: the new :class:`SyncState` object or None """ sync_state = self.sync_state(leader, sync_standby) ret = self.patch_or_create(self.sync_path, sync_state, version, False) if not isinstance(ret, bool): return SyncState.from_node(ret.metadata.resource_version, sync_state) def delete_sync_state(self, version: Optional[str] = None) -> bool: """Patch annotations of $SCOPE-sync Endpoint or ConfigMap with empty values. Effectively it removes "leader" and "sync_standby" annotations from the object. :param version: last known `resource_version` for conditional update of the object :returns: `True` if "delete" was successful """ return self.write_sync_state(None, None, version=version) is not None def watch(self, leader_version: Optional[str], timeout: float) -> bool: if self.__do_not_watch: self.__do_not_watch = False return True # We want to give a bit more time to non-leader nodes to synchronize HA loops if leader_version: timeout += 0.5 try: return super(Kubernetes, self).watch(None, timeout) finally: self.event.clear() patroni-3.2.2/patroni/dcs/raft.py000066400000000000000000000453741455170150700167470ustar00rootroot00000000000000import json import logging import os import threading import time from collections import defaultdict from pysyncobj import SyncObj, SyncObjConf, replicated, FAIL_REASON from pysyncobj.dns_resolver import globalDnsResolver from pysyncobj.node import TCPNode from pysyncobj.transport import TCPTransport, CONNECTION_STATE from pysyncobj.utility import TcpUtility from typing import Any, Callable, Collection, Dict, List, Optional, Set, Union, TYPE_CHECKING from . import AbstractDCS, ClusterConfig, Cluster, Failover, Leader, Member, Status, SyncState, \ TimelineHistory, citus_group_re from ..exceptions import DCSError from ..utils import validate_directory if TYPE_CHECKING: # pragma: no cover from ..config import Config logger = logging.getLogger(__name__) class RaftError(DCSError): pass class _TCPTransport(TCPTransport): def __init__(self, syncObj: 'DynMemberSyncObj', selfNode: Optional[TCPNode], otherNodes: Collection[TCPNode]) -> None: super(_TCPTransport, self).__init__(syncObj, selfNode, otherNodes) self.setOnUtilityMessageCallback('members', syncObj.getMembers) def _connectIfNecessarySingle(self, node: TCPNode) -> bool: try: return super(_TCPTransport, self)._connectIfNecessarySingle(node) except Exception as e: logger.debug('Connection to %s failed: %r', node, e) return False def resolve_host(self: TCPNode) -> Optional[str]: return globalDnsResolver().resolve(self.host) setattr(TCPNode, 'ip', property(resolve_host)) class SyncObjUtility(object): def __init__(self, otherNodes: Collection[Union[str, TCPNode]], conf: SyncObjConf, retry_timeout: int = 10) -> None: self._nodes = otherNodes self._utility = TcpUtility(conf.password, retry_timeout / max(1, len(otherNodes))) self.__node = next(iter(otherNodes), None) def executeCommand(self, command: List[Any]) -> Any: try: if self.__node: return self._utility.executeCommand(self.__node, command) except Exception: return None def getMembers(self) -> Optional[List[str]]: for self.__node in self._nodes: response = self.executeCommand(['members']) if response: return [member['addr'] for member in response] class DynMemberSyncObj(SyncObj): def __init__(self, selfAddress: Optional[str], partnerAddrs: Collection[str], conf: SyncObjConf, retry_timeout: int = 10) -> None: self.__early_apply_local_log = selfAddress is not None self.applied_local_log = False utility = SyncObjUtility(partnerAddrs, conf, retry_timeout) members = utility.getMembers() add_self = members and selfAddress not in members partnerAddrs = [member for member in (members or partnerAddrs) if member != selfAddress] super(DynMemberSyncObj, self).__init__(selfAddress, partnerAddrs, conf, transportClass=_TCPTransport) if add_self: thread = threading.Thread(target=utility.executeCommand, args=(['add', selfAddress],)) thread.daemon = True thread.start() def getMembers(self, args: Any, callback: Callable[[Any, Any], Any]) -> None: callback([{'addr': node.id, 'leader': node == self._getLeader(), 'status': CONNECTION_STATE.CONNECTED if self.isNodeConnected(node) else CONNECTION_STATE.DISCONNECTED} for node in self.otherNodes] + [{'addr': self.selfNode.id, 'leader': self._isLeader(), 'status': CONNECTION_STATE.CONNECTED}], None) def _onTick(self, timeToWait: float = 0.0): super(DynMemberSyncObj, self)._onTick(timeToWait) # The SyncObj calls onReady callback only when cluster got the leader and is ready for writes. # In some cases for us it is safe to "signal" the Raft object when the local log is fully applied. # We are using the `applied_local_log` property for that, but not calling the callback function. if self.__early_apply_local_log and not self.applied_local_log and self.raftLastApplied == self.raftCommitIndex: self.applied_local_log = True class KVStoreTTL(DynMemberSyncObj): def __init__(self, on_ready: Optional[Callable[..., Any]], on_set: Optional[Callable[[str, Dict[str, Any]], None]], on_delete: Optional[Callable[[str], None]], **config: Any) -> None: self.__thread = None self.__on_set = on_set self.__on_delete = on_delete self.__limb: Dict[str, Dict[str, Any]] = {} self.set_retry_timeout(int(config.get('retry_timeout') or 10)) self_addr = config.get('self_addr') partner_addrs: Set[str] = set(config.get('partner_addrs', [])) if config.get('patronictl'): if self_addr: partner_addrs.add(self_addr) self_addr = None # Create raft data_dir if necessary raft_data_dir = config.get('data_dir', '') if raft_data_dir != '': validate_directory(raft_data_dir) file_template = (self_addr or '') file_template = file_template.replace(':', '_') if os.name == 'nt' else file_template file_template = os.path.join(raft_data_dir, file_template) conf = SyncObjConf(password=config.get('password'), autoTick=False, appendEntriesUseBatch=False, bindAddress=config.get('bind_addr'), dnsFailCacheTime=(config.get('loop_wait') or 10), dnsCacheTime=(config.get('ttl') or 30), commandsWaitLeader=config.get('commandsWaitLeader'), fullDumpFile=(file_template + '.dump' if self_addr else None), journalFile=(file_template + '.journal' if self_addr else None), onReady=on_ready, dynamicMembershipChange=True) super(KVStoreTTL, self).__init__(self_addr, partner_addrs, conf, self.__retry_timeout) self.__data: Dict[str, Dict[str, Any]] = {} @staticmethod def __check_requirements(old_value: Dict[str, Any], **kwargs: Any) -> bool: return bool(('prevExist' not in kwargs or bool(kwargs['prevExist']) == bool(old_value)) and ('prevValue' not in kwargs or old_value and old_value['value'] == kwargs['prevValue']) and (kwargs.get('prevIndex') is None or old_value and old_value['index'] == kwargs['prevIndex'])) def set_retry_timeout(self, retry_timeout: int) -> None: self.__retry_timeout = retry_timeout def retry(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any: event = threading.Event() ret = {'result': None, 'error': -1} def callback(result: Any, error: Any) -> None: ret.update(result=result, error=error) event.set() kwargs['callback'] = callback timeout = kwargs.pop('timeout', None) or self.__retry_timeout deadline = timeout and time.time() + timeout while True: event.clear() func(*args, **kwargs) event.wait(timeout) if ret['error'] == FAIL_REASON.SUCCESS: return ret['result'] elif ret['error'] == FAIL_REASON.REQUEST_DENIED: break elif deadline: timeout = deadline - time.time() if timeout <= 0: raise RaftError('timeout') time.sleep(1) return False @replicated def _set(self, key: str, value: Dict[str, Any], **kwargs: Any) -> Union[bool, Dict[str, Any]]: old_value = self.__data.get(key, {}) if not self.__check_requirements(old_value, **kwargs): return False if old_value and old_value['created'] != value['created']: value['created'] = value['updated'] value['index'] = self.raftLastApplied + 1 self.__data[key] = value if self.__on_set: self.__on_set(key, value) return value def set(self, key: str, value: str, ttl: Optional[int] = None, handle_raft_error: bool = True, **kwargs: Any) -> Union[bool, Dict[str, Any]]: old_value = self.__data.get(key, {}) if not self.__check_requirements(old_value, **kwargs): return False data: Dict[str, Any] = {'value': value, 'updated': time.time()} data['created'] = old_value.get('created', data['updated']) if ttl: data['expire'] = data['updated'] + ttl try: return self.retry(self._set, key, data, **kwargs) except RaftError: if not handle_raft_error: raise return False def __pop(self, key: str) -> None: self.__data.pop(key) if self.__on_delete: self.__on_delete(key) @replicated def _delete(self, key: str, recursive: bool = False, **kwargs: Any) -> bool: if recursive: for k in list(self.__data.keys()): if k.startswith(key): self.__pop(k) elif not self.__check_requirements(self.__data.get(key, {}), **kwargs): return False else: self.__pop(key) return True def delete(self, key: str, recursive: bool = False, **kwargs: Any) -> bool: if not recursive and not self.__check_requirements(self.__data.get(key, {}), **kwargs): return False try: return self.retry(self._delete, key, recursive=recursive, **kwargs) except RaftError: return False @staticmethod def __values_match(old: Dict[str, Any], new: Dict[str, Any]) -> bool: return all(old.get(n) == new.get(n) for n in ('created', 'updated', 'expire', 'value')) @replicated def _expire(self, key: str, value: Dict[str, Any], callback: Optional[Callable[..., Any]] = None) -> None: current = self.__data.get(key) if current and self.__values_match(current, value): self.__pop(key) def __expire_keys(self) -> None: for key, value in self.__data.items(): if value and 'expire' in value and value['expire'] <= time.time() and \ not (key in self.__limb and self.__values_match(self.__limb[key], value)): self.__limb[key] = value def callback(*args: Any) -> None: if key in self.__limb and self.__values_match(self.__limb[key], value): self.__limb.pop(key) self._expire(key, value, callback=callback) def get(self, key: str, recursive: bool = False) -> Union[None, Dict[str, Any], Dict[str, Dict[str, Any]]]: if not recursive: return self.__data.get(key) return {k: v for k, v in self.__data.items() if k.startswith(key)} def _onTick(self, timeToWait: float = 0.0) -> None: super(KVStoreTTL, self)._onTick(timeToWait) if self._isLeader(): self.__expire_keys() else: self.__limb.clear() def _autoTickThread(self) -> None: self.__destroying = False while not self.__destroying: self.doTick(self.conf.autoTickPeriod) def startAutoTick(self) -> None: self.__thread = threading.Thread(target=self._autoTickThread) self.__thread.daemon = True self.__thread.start() def destroy(self) -> None: if self.__thread: self.__destroying = True self.__thread.join() super(KVStoreTTL, self).destroy() class Raft(AbstractDCS): def __init__(self, config: Dict[str, Any]) -> None: super(Raft, self).__init__(config) self._ttl = int(config.get('ttl') or 30) ready_event = threading.Event() self._sync_obj = KVStoreTTL(ready_event.set, self._on_set, self._on_delete, commandsWaitLeader=False, **config) self._sync_obj.startAutoTick() while True: ready_event.wait(5) if ready_event.is_set() or self._sync_obj.applied_local_log: break else: logger.info('waiting on raft') def _on_set(self, key: str, value: Dict[str, Any]) -> None: leader = (self._sync_obj.get(self.leader_path) or {}).get('value') if key == value['created'] == value['updated'] and \ (key.startswith(self.members_path) or key == self.leader_path and leader != self._name) or \ key in (self.leader_optime_path, self.status_path) and leader != self._name or \ key in (self.config_path, self.sync_path): self.event.set() def _on_delete(self, key: str) -> None: if key == self.leader_path: self.event.set() def set_ttl(self, ttl: int) -> Optional[bool]: self._ttl = ttl @property def ttl(self) -> int: return self._ttl def set_retry_timeout(self, retry_timeout: int) -> None: self._sync_obj.set_retry_timeout(retry_timeout) def reload_config(self, config: Union['Config', Dict[str, Any]]) -> None: super(Raft, self).reload_config(config) globalDnsResolver().setTimeouts(self.ttl, self.loop_wait) @staticmethod def member(key: str, value: Dict[str, Any]) -> Member: return Member.from_node(value['index'], os.path.basename(key), None, value['value']) def _cluster_from_nodes(self, nodes: Dict[str, Any]) -> Cluster: # get initialize flag initialize = nodes.get(self._INITIALIZE) initialize = initialize and initialize['value'] # get global dynamic configuration config = nodes.get(self._CONFIG) config = config and ClusterConfig.from_node(config['index'], config['value']) # get timeline history history = nodes.get(self._HISTORY) history = history and TimelineHistory.from_node(history['index'], history['value']) # get last know leader lsn and slots status = nodes.get(self._STATUS) or nodes.get(self._LEADER_OPTIME) status = Status.from_node(status and status['value']) # get list of members members = [self.member(k, n) for k, n in nodes.items() if k.startswith(self._MEMBERS) and k.count('/') == 1] # get leader leader = nodes.get(self._LEADER) if leader: member = Member(-1, leader['value'], None, {}) member = ([m for m in members if m.name == leader['value']] or [member])[0] leader = Leader(leader['index'], None, member) # failover key failover = nodes.get(self._FAILOVER) if failover: failover = Failover.from_node(failover['index'], failover['value']) # get synchronization state sync = nodes.get(self._SYNC) sync = SyncState.from_node(sync and sync['index'], sync and sync['value']) # get failsafe topology failsafe = nodes.get(self._FAILSAFE) try: failsafe = json.loads(failsafe['value']) if failsafe else None except Exception: failsafe = None return Cluster(initialize, config, leader, status, members, failover, sync, history, failsafe) def _cluster_loader(self, path: str) -> Cluster: response = self._sync_obj.get(path, recursive=True) if not response: return Cluster.empty() nodes = {key[len(path):]: value for key, value in response.items()} return self._cluster_from_nodes(nodes) def _citus_cluster_loader(self, path: str) -> Dict[int, Cluster]: clusters: Dict[int, Dict[str, Any]] = defaultdict(dict) response = self._sync_obj.get(path, recursive=True) for key, value in (response or {}).items(): key = key[len(path):].split('/', 1) if len(key) == 2 and citus_group_re.match(key[0]): clusters[int(key[0])][key[1]] = value return {group: self._cluster_from_nodes(nodes) for group, nodes in clusters.items()} def _load_cluster( self, path: str, loader: Callable[[str], Union[Cluster, Dict[int, Cluster]]] ) -> Union[Cluster, Dict[int, Cluster]]: return loader(path) def _write_leader_optime(self, last_lsn: str) -> bool: return self._sync_obj.set(self.leader_optime_path, last_lsn, timeout=1) is not False def _write_status(self, value: str) -> bool: return self._sync_obj.set(self.status_path, value, timeout=1) is not False def _write_failsafe(self, value: str) -> bool: return self._sync_obj.set(self.failsafe_path, value, timeout=1) is not False def _update_leader(self, leader: Leader) -> bool: ret = self._sync_obj.set(self.leader_path, self._name, ttl=self._ttl, handle_raft_error=False, prevValue=self._name) is not False if not ret and self._sync_obj.get(self.leader_path) is None: ret = self.attempt_to_acquire_leader() return ret def attempt_to_acquire_leader(self) -> bool: return self._sync_obj.set(self.leader_path, self._name, ttl=self._ttl, handle_raft_error=False, prevExist=False) is not False def set_failover_value(self, value: str, version: Optional[int] = None) -> bool: return self._sync_obj.set(self.failover_path, value, prevIndex=version) is not False def set_config_value(self, value: str, version: Optional[int] = None) -> bool: return self._sync_obj.set(self.config_path, value, prevIndex=version) is not False def touch_member(self, data: Dict[str, Any]) -> bool: value = json.dumps(data, separators=(',', ':')) return self._sync_obj.set(self.member_path, value, self._ttl, timeout=2) is not False def take_leader(self) -> bool: return self._sync_obj.set(self.leader_path, self._name, ttl=self._ttl) is not False def initialize(self, create_new: bool = True, sysid: str = '') -> bool: return self._sync_obj.set(self.initialize_path, sysid, prevExist=(not create_new)) is not False def _delete_leader(self, leader: Leader) -> bool: return self._sync_obj.delete(self.leader_path, prevValue=self._name, timeout=1) def cancel_initialization(self) -> bool: return self._sync_obj.delete(self.initialize_path) def delete_cluster(self) -> bool: return self._sync_obj.delete(self.client_path(''), recursive=True) def set_history_value(self, value: str) -> bool: return self._sync_obj.set(self.history_path, value) is not False def set_sync_state_value(self, value: str, version: Optional[int] = None) -> Union[int, bool]: ret = self._sync_obj.set(self.sync_path, value, prevIndex=version) if isinstance(ret, dict): return ret['index'] return ret def delete_sync_state(self, version: Optional[int] = None) -> bool: return self._sync_obj.delete(self.sync_path, prevIndex=version) def watch(self, leader_version: Optional[int], timeout: float) -> bool: try: return super(Raft, self).watch(leader_version, timeout) finally: self.event.clear() patroni-3.2.2/patroni/dcs/zookeeper.py000066400000000000000000000475421455170150700200150ustar00rootroot00000000000000import json import logging import select import socket import time from kazoo.client import KazooClient, KazooState, KazooRetry from kazoo.exceptions import ConnectionClosedError, NoNodeError, NodeExistsError, SessionExpiredError from kazoo.handlers.threading import AsyncResult, SequentialThreadingHandler from kazoo.protocol.states import KeeperState, WatchedEvent, ZnodeStat from kazoo.retry import RetryFailedError from kazoo.security import ACL, make_acl from typing import Any, Callable, Dict, List, Optional, Union, Tuple, TYPE_CHECKING from . import AbstractDCS, ClusterConfig, Cluster, Failover, Leader, Member, Status, SyncState, \ TimelineHistory, citus_group_re from ..exceptions import DCSError from ..utils import deep_compare if TYPE_CHECKING: # pragma: no cover from ..config import Config logger = logging.getLogger(__name__) class ZooKeeperError(DCSError): pass class PatroniSequentialThreadingHandler(SequentialThreadingHandler): def __init__(self, connect_timeout: Union[int, float]) -> None: super(PatroniSequentialThreadingHandler, self).__init__() self.set_connect_timeout(connect_timeout) def set_connect_timeout(self, connect_timeout: Union[int, float]) -> None: self._connect_timeout = max(1.0, connect_timeout / 2.0) # try to connect to zookeeper node during loop_wait/2 def create_connection(self, *args: Any, **kwargs: Any) -> socket.socket: """This method is trying to establish connection with one of the zookeeper nodes. Somehow strategy "fail earlier and retry more often" works way better comparing to the original strategy "try to connect with specified timeout". Since we want to try connect to zookeeper more often (with the smaller connect_timeout), he have to override `create_connection` method in the `SequentialThreadingHandler` class (which is used by `kazoo.Client`). :param args: always contains `tuple(host, port)` as the first element and could contain `connect_timeout` (negotiated session timeout) as the second element.""" args_list: List[Any] = list(args) if len(args_list) == 0: # kazoo 2.6.0 slightly changed the way how it calls create_connection method kwargs['timeout'] = max(self._connect_timeout, kwargs.get('timeout', self._connect_timeout * 10) / 10.0) elif len(args_list) == 1: args_list.append(self._connect_timeout) else: args_list[1] = max(self._connect_timeout, args_list[1] / 10.0) return super(PatroniSequentialThreadingHandler, self).create_connection(*args_list, **kwargs) def select(self, *args: Any, **kwargs: Any) -> Any: """ Python 3.XY may raise following exceptions if select/poll are called with an invalid socket: - `ValueError`: because fd == -1 - `TypeError`: Invalid file descriptor: -1 (starting from kazoo 2.9) Python 2.7 may raise the `IOError` instead of `socket.error` (starting from kazoo 2.9) When it is appropriate we map these exceptions to `socket.error`. """ try: return super(PatroniSequentialThreadingHandler, self).select(*args, **kwargs) except (TypeError, ValueError) as e: raise select.error(9, str(e)) class PatroniKazooClient(KazooClient): def _call(self, request: Tuple[Any], async_object: AsyncResult) -> Optional[bool]: # Before kazoo==2.7.0 it wasn't possible to send requests to zookeeper if # the connection is in the SUSPENDED state and Patroni was strongly relying on it. # The https://github.com/python-zk/kazoo/pull/588 changed it, and now such requests are queued. # We override the `_call()` method in order to keep the old behavior. if self._state == KeeperState.CONNECTING: async_object.set_exception(SessionExpiredError()) return False return super(PatroniKazooClient, self)._call(request, async_object) class ZooKeeper(AbstractDCS): def __init__(self, config: Dict[str, Any]) -> None: super(ZooKeeper, self).__init__(config) hosts: Union[str, List[str]] = config.get('hosts', []) if isinstance(hosts, list): hosts = ','.join(hosts) mapping = {'use_ssl': 'use_ssl', 'verify': 'verify_certs', 'cacert': 'ca', 'cert': 'certfile', 'key': 'keyfile', 'key_password': 'keyfile_password'} kwargs = {v: config[k] for k, v in mapping.items() if k in config} if 'set_acls' in config: default_acl: List[ACL] = [] for principal, permissions in config['set_acls'].items(): normalizedPermissions = [p.upper() for p in permissions] default_acl.append(make_acl(scheme='x509', credential=principal, read='READ' in normalizedPermissions, write='WRITE' in normalizedPermissions, create='CREATE' in normalizedPermissions, delete='DELETE' in normalizedPermissions, admin='ADMIN' in normalizedPermissions, all='ALL' in normalizedPermissions)) kwargs['default_acl'] = default_acl self._client = PatroniKazooClient(hosts, handler=PatroniSequentialThreadingHandler(config['retry_timeout']), timeout=config['ttl'], connection_retry=KazooRetry(max_delay=1, max_tries=-1, sleep_func=time.sleep), command_retry=KazooRetry(max_delay=1, max_tries=-1, deadline=config['retry_timeout'], sleep_func=time.sleep), **kwargs) self.__last_member_data: Optional[Dict[str, Any]] = None self._orig_kazoo_connect = self._client._connection._connect self._client._connection._connect = self._kazoo_connect self._client.start() def _kazoo_connect(self, *args: Any) -> Tuple[Union[int, float], Union[int, float]]: """Kazoo is using Ping's to determine health of connection to zookeeper. If there is no response on Ping after Ping interval (1/2 from read_timeout) it will consider current connection dead and try to connect to another node. Without this "magic" it was taking up to 2/3 from session timeout (ttl) to figure out that connection was dead and we had only small time for reconnect and retry. This method is needed to return different value of read_timeout, which is not calculated from negotiated session timeout but from value of `loop_wait`. And it is 2 sec smaller than loop_wait, because we can spend up to 2 seconds when calling `touch_member()` and `write_leader_optime()` methods, which also may hang...""" ret = self._orig_kazoo_connect(*args) return max(self.loop_wait - 2, 2) * 1000, ret[1] def _watcher(self, event: WatchedEvent) -> None: if event.state != KazooState.CONNECTED or event.path.startswith(self.client_path('')): self.event.set() def reload_config(self, config: Union['Config', Dict[str, Any]]) -> None: self.set_retry_timeout(config['retry_timeout']) loop_wait = config['loop_wait'] loop_wait_changed = self._loop_wait != loop_wait self._loop_wait = loop_wait if isinstance(self._client.handler, PatroniSequentialThreadingHandler): self._client.handler.set_connect_timeout(loop_wait) # We need to reestablish connection to zookeeper if we want to change # read_timeout (and Ping interval respectively), because read_timeout # is calculated in `_kazoo_connect` method. If we are changing ttl at # the same time, set_ttl method will reestablish connection and return # `!True`, otherwise we will close existing connection and let kazoo # open the new one. if not self.set_ttl(config['ttl']) and loop_wait_changed: self._client._connection._socket.close() def set_ttl(self, ttl: int) -> Optional[bool]: """It is not possible to change ttl (session_timeout) in zookeeper without destroying old session and creating the new one. This method returns `!True` if session_timeout has been changed (`restart()` has been called).""" ttl = int(ttl * 1000) if self._client._session_timeout != ttl: self._client._session_timeout = ttl self._client.restart() return True @property def ttl(self) -> int: return int(self._client._session_timeout / 1000.0) def set_retry_timeout(self, retry_timeout: int) -> None: retry = self._client.retry if isinstance(self._client.retry, KazooRetry) else self._client._retry retry.deadline = retry_timeout def get_node( self, key: str, watch: Optional[Callable[[WatchedEvent], None]] = None ) -> Optional[Tuple[str, ZnodeStat]]: try: ret = self._client.get(key, watch) return (ret[0].decode('utf-8'), ret[1]) except NoNodeError: return None def get_status(self, path: str, leader: Optional[Leader]) -> Status: status = self.get_node(path + self._STATUS) if not status: status = self.get_node(path + self._LEADER_OPTIME) return Status.from_node(status and status[0]) @staticmethod def member(name: str, value: str, znode: ZnodeStat) -> Member: return Member.from_node(znode.version, name, znode.ephemeralOwner, value) def get_children(self, key: str) -> List[str]: try: return self._client.get_children(key) except NoNodeError: return [] def load_members(self, path: str) -> List[Member]: members: List[Member] = [] for member in self.get_children(path + self._MEMBERS): data = self.get_node(path + self._MEMBERS + member) if data is not None: members.append(self.member(member, *data)) return members def _cluster_loader(self, path: str) -> Cluster: nodes = set(self.get_children(path)) # get initialize flag initialize = (self.get_node(path + self._INITIALIZE) or [None])[0] if self._INITIALIZE in nodes else None # get global dynamic configuration config = self.get_node(path + self._CONFIG, watch=self._watcher) if self._CONFIG in nodes else None config = config and ClusterConfig.from_node(config[1].version, config[0], config[1].mzxid) # get timeline history history = self.get_node(path + self._HISTORY) if self._HISTORY in nodes else None history = history and TimelineHistory.from_node(history[1].mzxid, history[0]) # get synchronization state sync = self.get_node(path + self._SYNC) if self._SYNC in nodes else None sync = SyncState.from_node(sync and sync[1].version, sync and sync[0]) # get list of members members = self.load_members(path) if self._MEMBERS[:-1] in nodes else [] # get leader leader = self.get_node(path + self._LEADER, watch=self._watcher) if self._LEADER in nodes else None if leader: member = Member(-1, leader[0], None, {}) member = ([m for m in members if m.name == leader[0]] or [member])[0] leader = Leader(leader[1].version, leader[1].ephemeralOwner, member) # get last known leader lsn and slots status = self.get_status(path, leader) # failover key failover = self.get_node(path + self._FAILOVER) if self._FAILOVER in nodes else None failover = failover and Failover.from_node(failover[1].version, failover[0]) # get failsafe topology failsafe = self.get_node(path + self._FAILSAFE) if self._FAILSAFE in nodes else None try: failsafe = json.loads(failsafe[0]) if failsafe else None except Exception: failsafe = None return Cluster(initialize, config, leader, status, members, failover, sync, history, failsafe) def _citus_cluster_loader(self, path: str) -> Dict[int, Cluster]: ret: Dict[int, Cluster] = {} for node in self.get_children(path): if citus_group_re.match(node): ret[int(node)] = self._cluster_loader(path + node + '/') return ret def _load_cluster( self, path: str, loader: Callable[[str], Union[Cluster, Dict[int, Cluster]]] ) -> Union[Cluster, Dict[int, Cluster]]: try: return self._client.retry(loader, path) except Exception: logger.exception('get_cluster') raise ZooKeeperError('ZooKeeper in not responding properly') def _create(self, path: str, value: bytes, retry: bool = False, ephemeral: bool = False) -> bool: try: if retry: self._client.retry(self._client.create, path, value, makepath=True, ephemeral=ephemeral) else: self._client.create_async(path, value, makepath=True, ephemeral=ephemeral).get(timeout=1) return True except Exception: logger.exception('Failed to create %s', path) return False def attempt_to_acquire_leader(self) -> bool: try: self._client.retry(self._client.create, self.leader_path, self._name.encode('utf-8'), makepath=True, ephemeral=True) return True except (ConnectionClosedError, RetryFailedError) as e: raise ZooKeeperError(e) except Exception as e: if not isinstance(e, NodeExistsError): logger.error('Failed to create %s: %r', self.leader_path, e) logger.info('Could not take out TTL lock') return False def _set_or_create(self, key: str, value: str, version: Optional[int] = None, retry: bool = False, do_not_create_empty: bool = False) -> Union[int, bool]: value_bytes = value.encode('utf-8') try: if retry: ret = self._client.retry(self._client.set, key, value_bytes, version=version or -1) else: ret = self._client.set_async(key, value_bytes, version=version or -1).get(timeout=1) return ret.version except NoNodeError: if do_not_create_empty and not value_bytes: return True elif version is None: if self._create(key, value_bytes, retry): return 0 else: return False except Exception: logger.exception('Failed to update %s', key) return False def set_failover_value(self, value: str, version: Optional[int] = None) -> bool: return self._set_or_create(self.failover_path, value, version) is not False def set_config_value(self, value: str, version: Optional[int] = None) -> bool: return self._set_or_create(self.config_path, value, version, retry=True) is not False def initialize(self, create_new: bool = True, sysid: str = "") -> bool: sysid_bytes = sysid.encode('utf-8') return self._create(self.initialize_path, sysid_bytes, retry=True) if create_new \ else self._client.retry(self._client.set, self.initialize_path, sysid_bytes) def touch_member(self, data: Dict[str, Any]) -> bool: cluster = self.cluster member = cluster and cluster.get_member(self._name, fallback_to_leader=False) member_data = self.__last_member_data or member and member.data if member and member_data: # We want delete the member ZNode if our session doesn't match with session id on our member key if self._client.client_id is not None and member.session != self._client.client_id[0]: logger.warning('Recreating the member ZNode due to ownership mismatch') try: self._client.delete_async(self.member_path).get(timeout=1) except NoNodeError: pass except Exception: return False member = None encoded_data = json.dumps(data, separators=(',', ':')).encode('utf-8') if member and member_data: if deep_compare(data, member_data): return True else: try: self._client.create_async(self.member_path, encoded_data, makepath=True, ephemeral=True).get(timeout=1) self.__last_member_data = data return True except Exception as e: if not isinstance(e, NodeExistsError): logger.exception('touch_member') return False try: self._client.set_async(self.member_path, encoded_data).get(timeout=1) self.__last_member_data = data return True except Exception: logger.exception('touch_member') return False def take_leader(self) -> bool: return self.attempt_to_acquire_leader() def _write_leader_optime(self, last_lsn: str) -> bool: return self._set_or_create(self.leader_optime_path, last_lsn) is not False def _write_status(self, value: str) -> bool: return self._set_or_create(self.status_path, value) is not False def _write_failsafe(self, value: str) -> bool: return self._set_or_create(self.failsafe_path, value) is not False def _update_leader(self, leader: Leader) -> bool: if self._client.client_id and self._client.client_id[0] != leader.session: logger.warning('Recreating the leader ZNode due to ownership mismatch') try: self._client.retry(self._client.delete, self.leader_path) except NoNodeError: pass except (ConnectionClosedError, RetryFailedError) as e: raise ZooKeeperError(e) except Exception as e: logger.error('Failed to remove %s: %r', self.leader_path, e) return False try: self._client.retry(self._client.create, self.leader_path, self._name.encode('utf-8'), makepath=True, ephemeral=True) except (ConnectionClosedError, RetryFailedError) as e: raise ZooKeeperError(e) except Exception as e: logger.error('Failed to create %s: %r', self.leader_path, e) return False return True def _delete_leader(self, leader: Leader) -> bool: self._client.restart() return True def _cancel_initialization(self) -> None: node = self.get_node(self.initialize_path) if node: self._client.delete(self.initialize_path, version=node[1].version) def cancel_initialization(self) -> bool: try: self._client.retry(self._cancel_initialization) return True except Exception: logger.exception("Unable to delete initialize key") return False def delete_cluster(self) -> bool: try: return self._client.retry(self._client.delete, self.client_path(''), recursive=True) except NoNodeError: return True def set_history_value(self, value: str) -> bool: return self._set_or_create(self.history_path, value) is not False def set_sync_state_value(self, value: str, version: Optional[int] = None) -> Union[int, bool]: return self._set_or_create(self.sync_path, value, version, retry=True, do_not_create_empty=True) def delete_sync_state(self, version: Optional[int] = None) -> bool: return self.set_sync_state_value("{}", version) is not False def watch(self, leader_version: Optional[int], timeout: float) -> bool: if leader_version: timeout += 0.5 try: return super(ZooKeeper, self).watch(leader_version, timeout) finally: self.event.clear() patroni-3.2.2/patroni/exceptions.py000066400000000000000000000024341455170150700174110ustar00rootroot00000000000000"""Implement high-level Patroni exceptions. More specific exceptions can be found in other modules, as subclasses of any exception defined in this module. """ from typing import Any class PatroniException(Exception): """Parent class for all kind of Patroni exceptions. :ivar value: description of the exception. """ def __init__(self, value: Any) -> None: """Create a new instance of :class:`PatroniException` with the given description. :param value: description of the exception. """ self.value = value class PatroniFatalException(PatroniException): """Catastrophic exception that prevents Patroni from performing its job.""" pass class PostgresException(PatroniException): """Any exception related with Postgres management.""" pass class DCSError(PatroniException): """Parent class for all kind of DCS related exceptions.""" pass class PostgresConnectionException(PostgresException): """Any problem faced while connecting to a Postgres instance.""" pass class WatchdogError(PatroniException): """Any problem faced while managing a watchdog device.""" pass class ConfigParseError(PatroniException): """Any issue identified while loading or validating the Patroni configuration.""" pass patroni-3.2.2/patroni/file_perm.py000066400000000000000000000067551455170150700172040ustar00rootroot00000000000000"""Helper object that helps with figuring out file and directory permissions based on permissions of PGDATA. :var logger: logger of this module. :var pg_perm: instance of the :class:`__FilePermissions` object. """ import logging import os import stat logger = logging.getLogger(__name__) class __FilePermissions: """Helper class for managing permissions of directories and files under PGDATA. Execute :meth:`set_permissions_from_data_directory` to figure out which permissions should be used for files and directories under PGDATA based on permissions of PGDATA root directory. """ # Mode mask for data directory permissions that only allows the owner to # read/write directories and files -- mask 077. __PG_MODE_MASK_OWNER = stat.S_IRWXG | stat.S_IRWXO # Mode mask for data directory permissions that also allows group read/execute -- mask 027. __PG_MODE_MASK_GROUP = stat.S_IWGRP | stat.S_IRWXO # Default mode for creating directories -- mode 700. __PG_DIR_MODE_OWNER = stat.S_IRWXU # Mode for creating directories that allows group read/execute -- mode 750. __PG_DIR_MODE_GROUP = stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP # Default mode for creating files -- mode 600. __PG_FILE_MODE_OWNER = stat.S_IRUSR | stat.S_IWUSR # Mode for creating files that allows group read -- mode 640. __PG_FILE_MODE_GROUP = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP def __init__(self) -> None: """Create a :class:`__FilePermissions` object and set default permissions.""" self.__set_owner_permissions() self.__set_umask() def __set_umask(self) -> None: """Set umask value based on calculations. .. note:: Should only be called once either :meth:`__set_owner_permissions` or :meth:`__set_group_permissions` has been executed. """ try: os.umask(self.__pg_mode_mask) except Exception as e: logger.error('Can not set umask to %03o: %r', self.__pg_mode_mask, e) def __set_owner_permissions(self) -> None: """Make directories/files accessible only by the owner.""" self.__pg_dir_create_mode = self.__PG_DIR_MODE_OWNER self.__pg_file_create_mode = self.__PG_FILE_MODE_OWNER self.__pg_mode_mask = self.__PG_MODE_MASK_OWNER def __set_group_permissions(self) -> None: """Make directories/files accessible by the owner and readable by group.""" self.__pg_dir_create_mode = self.__PG_DIR_MODE_GROUP self.__pg_file_create_mode = self.__PG_FILE_MODE_GROUP self.__pg_mode_mask = self.__PG_MODE_MASK_GROUP def set_permissions_from_data_directory(self, data_dir: str) -> None: """Set new permissions based on provided *data_dir*. :param data_dir: reference to PGDATA to calculate permissions from. """ try: st = os.stat(data_dir) if (st.st_mode & self.__PG_DIR_MODE_GROUP) == self.__PG_DIR_MODE_GROUP: self.__set_group_permissions() else: self.__set_owner_permissions() except Exception as e: logger.error('Can not check permissions on %s: %r', data_dir, e) else: self.__set_umask() @property def dir_create_mode(self) -> int: """Directory permissions.""" return self.__pg_dir_create_mode @property def file_create_mode(self) -> int: """File permissions.""" return self.__pg_file_create_mode pg_perm = __FilePermissions() patroni-3.2.2/patroni/ha.py000066400000000000000000003313271455170150700156260ustar00rootroot00000000000000import datetime import functools import json import logging import sys import time import uuid from multiprocessing.pool import ThreadPool from threading import RLock from typing import Any, Callable, Collection, Dict, List, NamedTuple, Optional, Union, Tuple, TYPE_CHECKING from . import psycopg from .__main__ import Patroni from .async_executor import AsyncExecutor, CriticalTask from .collections import CaseInsensitiveSet from .dcs import AbstractDCS, Cluster, Leader, Member, RemoteMember, Status, slot_name_from_member_name from .exceptions import DCSError, PostgresConnectionException, PatroniFatalException from .postgresql.callback_executor import CallbackAction from .postgresql.misc import postgres_version_to_int from .postgresql.postmaster import PostmasterProcess from .postgresql.rewind import Rewind from .tags import Tags from .utils import polling_loop, tzutc logger = logging.getLogger(__name__) class _MemberStatus(Tags, NamedTuple('_MemberStatus', [('member', Member), ('reachable', bool), ('in_recovery', Optional[bool]), ('wal_position', int), ('data', Dict[str, Any])])): """Node status distilled from API response. Consists of the following fields: :ivar member: :class:`~patroni.dcs.Member` object of the node. :ivar reachable: ``False`` if the node is not reachable or is not responding with correct JSON. :ivar in_recovery: ``False`` if the node is running as a primary (`if pg_is_in_recovery() == true`). :ivar wal_position: maximum value of ``replayed_location`` or ``received_location`` from JSON. :ivar data: the whole JSON response for future usage. """ @classmethod def from_api_response(cls, member: Member, json: Dict[str, Any]) -> '_MemberStatus': """ :param member: dcs.Member object :param json: RestApiHandler.get_postgresql_status() result :returns: _MemberStatus object """ # If one of those is not in a response we want to count the node as not healthy/reachable wal: Dict[str, Any] = json.get('wal') or json['xlog'] # abuse difference in primary/replica response format in_recovery = not (bool(wal.get('location')) or json.get('role') in ('master', 'primary')) lsn = int(in_recovery and max(wal.get('received_location', 0), wal.get('replayed_location', 0))) return cls(member, True, in_recovery, lsn, json) @property def tags(self) -> Dict[str, Any]: """Dictionary with values of different tags (i.e. nofailover).""" return self.data.get('tags', {}) @property def timeline(self) -> int: """Timeline value from JSON.""" return self.data.get('timeline', 0) @property def watchdog_failed(self) -> bool: """Indicates that watchdog is required by configuration but not available or failed.""" return self.data.get('watchdog_failed', False) @classmethod def unknown(cls, member: Member) -> '_MemberStatus': """Create a new class instance with empty or null values.""" return cls(member, False, None, 0, {}) def failover_limitation(self) -> Optional[str]: """Returns reason why this node can't promote or None if everything is ok.""" if not self.reachable: return 'not reachable' if self.nofailover: return 'not allowed to promote' if self.watchdog_failed: return 'not watchdog capable' return None class Failsafe(object): def __init__(self, dcs: AbstractDCS) -> None: self._lock = RLock() self._dcs = dcs self._reset_state() def update(self, data: Dict[str, Any]) -> None: with self._lock: self._last_update = time.time() self._name = data['name'] self._conn_url = data['conn_url'] self._api_url = data['api_url'] self._slots = data.get('slots') def _reset_state(self) -> None: self._last_update = 0 self._name = None self._conn_url = None self._api_url = None self._slots = None @property def leader(self) -> Optional[Leader]: with self._lock: if self._last_update + self._dcs.ttl > time.time() and self._name: return Leader('', '', RemoteMember(self._name, {'api_url': self._api_url, 'conn_url': self._conn_url, 'slots': self._slots})) def update_cluster(self, cluster: Cluster) -> Cluster: # Enreach cluster with the real leader if there was a ping from it leader = self.leader if leader: # We rely on the strict order of fields in the namedtuple status = Status(cluster.status.last_lsn, leader.member.data['slots']) cluster = Cluster(*cluster[0:2], leader, status, *cluster[4:]) return cluster def is_active(self) -> bool: """Is used to report in REST API whether the failsafe mode was activated. On primary the self._last_update is set from the set_is_active() method and always returns the correct value. On replicas the self._last_update is set at the moment when the primary performs POST /failsafe REST API calls. The side-effect - it is possible that replicas will show failsafe_is_active values different from the primary.""" with self._lock: return self._last_update + self._dcs.ttl > time.time() def set_is_active(self, value: float) -> None: with self._lock: self._last_update = value if not value: self._reset_state() class Ha(object): def __init__(self, patroni: Patroni): self.patroni = patroni self.state_handler = patroni.postgresql self._rewind = Rewind(self.state_handler) self.dcs = patroni.dcs self.cluster = Cluster.empty() self.global_config = self.patroni.config.get_global_config(None) self.old_cluster = Cluster.empty() self._leader_expiry = 0 self._leader_expiry_lock = RLock() self._failsafe = Failsafe(patroni.dcs) self._was_paused = False self._leader_timeline = None self.recovering = False self._async_response = CriticalTask() self._crash_recovery_started = 0 self._start_timeout = None self._async_executor = AsyncExecutor(self.state_handler.cancellable, self.wakeup) self.watchdog = patroni.watchdog # Each member publishes various pieces of information to the DCS using touch_member. This lock protects # the state and publishing procedure to have consistent ordering and avoid publishing stale values. self._member_state_lock = RLock() # Count of concurrent sync disabling requests. Value above zero means that we don't want to be synchronous # standby. Changes protected by _member_state_lock. self._disable_sync = 0 # Remember the last known member role and state written to the DCS in order to notify Citus coordinator self._last_state = None # We need following property to avoid shutdown of postgres when join of Patroni to the postgres # already running as replica was aborted due to cluster not being initialized in DCS. self._join_aborted = False # used only in backoff after failing a pre_promote script self._released_leader_key_timestamp = 0 def primary_stop_timeout(self) -> Union[int, None]: """:returns: "primary_stop_timeout" from the global configuration or `None` when not in synchronous mode.""" ret = self.global_config.primary_stop_timeout return ret if ret > 0 and self.is_synchronous_mode() else None def is_paused(self) -> bool: """:returns: `True` if in maintenance mode.""" return self.global_config.is_paused def check_timeline(self) -> bool: """:returns: `True` if should check whether the timeline is latest during the leader race.""" return self.global_config.check_mode('check_timeline') def is_standby_cluster(self) -> bool: """:returns: `True` if global configuration has a valid "standby_cluster" section.""" return self.global_config.is_standby_cluster def is_leader(self) -> bool: """:returns: `True` if the current node is the leader, based on expiration set when it last held the key.""" with self._leader_expiry_lock: return self._leader_expiry > time.time() def set_is_leader(self, value: bool) -> None: """Update the current node's view of it's own leadership status. Will update the expiry timestamp to match the dcs ttl if setting leadership to true, otherwise will set the expiry to the past to immediately invalidate. :param value: is the current node the leader. """ with self._leader_expiry_lock: self._leader_expiry = time.time() + self.dcs.ttl if value else 0 def sync_mode_is_active(self) -> bool: """Check whether synchronous replication is requested and already active. :returns: ``True`` if the primary already put its name into the ``/sync`` in DCS. """ return self.is_synchronous_mode() and not self.cluster.sync.is_empty def _get_failover_action_name(self) -> str: """Return the currently requested manual failover action name or the default ``failover``. :returns: :class:`str` representing the manually requested action (``manual failover`` if no leader is specified in the ``/failover`` in DCS, ``switchover`` otherwise) or ``failover`` if ``/failover`` is empty. """ if not self.cluster.failover: return 'failover' return 'switchover' if self.cluster.failover.leader else 'manual failover' def load_cluster_from_dcs(self) -> None: cluster = self.dcs.get_cluster() # We want to keep the state of cluster when it was healthy if not cluster.is_unlocked() or not self.old_cluster: self.old_cluster = cluster self.cluster = cluster if self.cluster.is_unlocked() and self.is_failsafe_mode(): # If failsafe mode is enabled we want to inject the "real" leader to the cluster self.cluster = cluster = self._failsafe.update_cluster(cluster) if not self.has_lock(False): self.set_is_leader(False) self._leader_timeline = cluster.leader.timeline if cluster.leader else None def acquire_lock(self) -> bool: try: ret = self.dcs.attempt_to_acquire_leader() except DCSError: raise except Exception: logger.exception('Unexpected exception raised from attempt_to_acquire_leader, please report it as a BUG') ret = False self.set_is_leader(ret) return ret def _failsafe_config(self) -> Optional[Dict[str, str]]: if self.is_failsafe_mode(): ret = {m.name: m.api_url for m in self.cluster.members if m.api_url} if self.state_handler.name not in ret: ret[self.state_handler.name] = self.patroni.api.connection_string return ret def update_lock(self, update_status: bool = False) -> bool: """Update the leader lock in DCS. .. note:: After successful update of the leader key the :meth:`AbstractDCS.update_leader` method could also optionally update the ``/status`` and ``/failsafe`` keys. The ``/status`` key contains the last known LSN on the leader node and the last known state of permanent replication slots including permanent physical replication slot for the leader. Last, but not least, this method calls a :meth:`Watchdog.keepalive` method after the leader key was successfully updated. :param update_status: ``True`` if we also need to update the ``/status`` key in DCS, otherwise ``False``. :returns: ``True`` if the leader key was successfully updated and we can continue to run postgres as a ``primary`` or as a ``standby_leader``, otherwise ``False``. """ last_lsn = slots = None if update_status: try: last_lsn = self.state_handler.last_operation() slots = self.cluster.filter_permanent_slots( {**self.state_handler.slots(), slot_name_from_member_name(self.state_handler.name): last_lsn}, self.is_standby_cluster(), self.state_handler.major_version) except Exception: logger.exception('Exception when called state_handler.last_operation()') if TYPE_CHECKING: # pragma: no cover assert self.cluster.leader is not None try: ret = self.dcs.update_leader(self.cluster.leader, last_lsn, slots, self._failsafe_config()) except DCSError: raise except Exception: logger.exception('Unexpected exception raised from update_leader, please report it as a BUG') ret = False self.set_is_leader(ret) if ret: self.watchdog.keepalive() return ret def has_lock(self, info: bool = True) -> bool: lock_owner = self.cluster.leader and self.cluster.leader.name if info: logger.info('Lock owner: %s; I am %s', lock_owner, self.state_handler.name) return lock_owner == self.state_handler.name def get_effective_tags(self) -> Dict[str, Any]: """Return configuration tags merged with dynamically applied tags.""" tags = self.patroni.tags.copy() # _disable_sync could be modified concurrently, but we don't care as attribute get and set are atomic. if self._disable_sync > 0: tags['nosync'] = True return tags def notify_citus_coordinator(self, event: str) -> None: if self.state_handler.citus_handler.is_worker(): coordinator = self.dcs.get_citus_coordinator() if coordinator and coordinator.leader and coordinator.leader.conn_url: try: data = {'type': event, 'group': self.state_handler.citus_handler.group(), 'leader': self.state_handler.name, 'timeout': self.dcs.ttl, 'cooldown': self.patroni.config['retry_timeout']} timeout = self.dcs.ttl if event == 'before_demote' else 2 self.patroni.request(coordinator.leader.member, 'post', 'citus', data, timeout=timeout, retries=0) except Exception as e: logger.warning('Request to Citus coordinator leader %s %s failed: %r', coordinator.leader.name, coordinator.leader.member.api_url, e) def touch_member(self) -> bool: with self._member_state_lock: data: Dict[str, Any] = { 'conn_url': self.state_handler.connection_string, 'api_url': self.patroni.api.connection_string, 'state': self.state_handler.state, 'role': self.state_handler.role, 'version': self.patroni.version } proxy_url = self.state_handler.proxy_url if proxy_url: data['proxy_url'] = proxy_url if self.is_leader() and not self._rewind.checkpoint_after_promote(): data['checkpoint_after_promote'] = False tags = self.get_effective_tags() if tags: data['tags'] = tags if self.state_handler.pending_restart: data['pending_restart'] = True if self._async_executor.scheduled_action in (None, 'promote') \ and data['state'] in ['running', 'restarting', 'starting']: try: timeline, wal_position, pg_control_timeline = self.state_handler.timeline_wal_position() data['xlog_location'] = wal_position if not timeline: # running as a standby replication_state = self.state_handler.replication_state() if replication_state: data['replication_state'] = replication_state # try pg_stat_wal_receiver to get the timeline timeline = self.state_handler.received_timeline() if not timeline: # So far the only way to get the current timeline on the standby is from # the replication connection. In order to avoid opening the replication # connection on every iteration of HA loop we will do it only when noticed # that the timeline on the primary has changed. # Unfortunately such optimization isn't possible on the standby_leader, # therefore we will get the timeline from pg_control, either by calling # pg_control_checkpoint() on 9.6+ or by parsing the output of pg_controldata. if self.state_handler.role == 'standby_leader': timeline = pg_control_timeline or self.state_handler.pg_control_timeline() else: timeline = self.state_handler.replica_cached_timeline(self._leader_timeline) or 0 if timeline: data['timeline'] = timeline except Exception: pass if self.patroni.scheduled_restart: scheduled_restart_data = self.patroni.scheduled_restart.copy() scheduled_restart_data['schedule'] = scheduled_restart_data['schedule'].isoformat() data['scheduled_restart'] = scheduled_restart_data if self.is_paused(): data['pause'] = True ret = self.dcs.touch_member(data) if ret: new_state = (data['state'], {'master': 'primary'}.get(data['role'], data['role'])) if self._last_state != new_state and new_state == ('running', 'primary'): self.notify_citus_coordinator('after_promote') self._last_state = new_state return ret def clone(self, clone_member: Union[Leader, Member, None] = None, msg: str = '(without leader)') -> Optional[bool]: if self.is_standby_cluster() and not isinstance(clone_member, RemoteMember): clone_member = self.get_remote_member(clone_member) self._rewind.reset_state() if self.state_handler.bootstrap.clone(clone_member): logger.info('bootstrapped %s', msg) cluster = self.dcs.get_cluster() node_to_follow = self._get_node_to_follow(cluster) return self.state_handler.follow(node_to_follow) is not False else: logger.error('failed to bootstrap %s', msg) self.state_handler.remove_data_directory() def bootstrap(self) -> str: # no initialize key and node is allowed to be primary and has 'bootstrap' section in a configuration file if self.cluster.is_unlocked() and self.cluster.initialize is None\ and not self.patroni.nofailover and 'bootstrap' in self.patroni.config: if self.dcs.initialize(create_new=True): # race for initialization self.state_handler.bootstrapping = True with self._async_response: self._async_response.reset() if self.is_standby_cluster(): ret = self._async_executor.try_run_async('bootstrap_standby_leader', self.bootstrap_standby_leader) return ret or 'trying to bootstrap a new standby leader' else: ret = self._async_executor.try_run_async('bootstrap', self.state_handler.bootstrap.bootstrap, args=(self.patroni.config['bootstrap'],)) return ret or 'trying to bootstrap a new cluster' else: return 'failed to acquire initialize lock' clone_member = self.cluster.get_clone_member(self.state_handler.name) # cluster already has a leader, we can bootstrap from it or from one of replicas (if they allow) if not self.cluster.is_unlocked() and clone_member: member_role = 'leader' if clone_member == self.cluster.leader else 'replica' msg = "from {0} '{1}'".format(member_role, clone_member.name) ret = self._async_executor.try_run_async('bootstrap {0}'.format(msg), self.clone, args=(clone_member, msg)) return ret or 'trying to bootstrap {0}'.format(msg) # no leader, but configuration may allowed replica creation using backup tools create_replica_methods = self.global_config.get_standby_cluster_config().get('create_replica_methods', []) \ if self.is_standby_cluster() else None can_bootstrap = self.state_handler.can_create_replica_without_replication_connection(create_replica_methods) concurrent_bootstrap = self.cluster.initialize == "" if can_bootstrap and not concurrent_bootstrap: msg = 'bootstrap (without leader)' return self._async_executor.try_run_async(msg, self.clone) or 'trying to ' + msg return 'waiting for {0}leader to bootstrap'.format('standby_' if self.is_standby_cluster() else '') def bootstrap_standby_leader(self) -> Optional[bool]: """ If we found 'standby' key in the configuration, we need to bootstrap not a real primary, but a 'standby leader', that will take base backup from a remote member and start follow it. """ clone_source = self.get_remote_member() msg = 'clone from remote member {0}'.format(clone_source.conn_url) result = self.clone(clone_source, msg) with self._async_response: # pretend that post_bootstrap was already executed self._async_response.complete(result) if result: self.state_handler.set_role('standby_leader') return result def _handle_crash_recovery(self) -> Optional[str]: if self._crash_recovery_started == 0 and (self.cluster.is_unlocked() or self._rewind.can_rewind): self._crash_recovery_started = time.time() msg = 'doing crash recovery in a single user mode' return self._async_executor.try_run_async(msg, self._rewind.ensure_clean_shutdown) or msg def _handle_rewind_or_reinitialize(self) -> Optional[str]: leader = self.get_remote_member() if self.is_standby_cluster() else self.cluster.leader if not self._rewind.rewind_or_reinitialize_needed_and_possible(leader) or not leader: return None if self._rewind.can_rewind: # rewind is required, but postgres wasn't shut down cleanly. if not self.state_handler.is_running() and \ self.state_handler.controldata().get('Database cluster state') == 'in archive recovery': msg = self._handle_crash_recovery() if msg: return msg msg = 'running pg_rewind from ' + leader.name return self._async_executor.try_run_async(msg, self._rewind.execute, args=(leader,)) or msg if self._rewind.should_remove_data_directory_on_diverged_timelines and not self.is_standby_cluster(): msg = 'reinitializing due to diverged timelines' return self._async_executor.try_run_async(msg, self._do_reinitialize, args=(self.cluster,)) or msg def recover(self) -> str: """Handle the case when postgres isn't running. Depending on the state of Patroni, DCS cluster view, and pg_controldata the following could happen: - if ``primary_start_timeout`` is 0 and this node owns the leader lock, the lock will be voluntarily released if there are healthy replicas to take it over. - if postgres was running as a ``primary`` and this node owns the leader lock, postgres is started as primary. - crash recover in a single-user mode is executed in the following cases: - postgres was running as ``primary`` wasn't ``shut down`` cleanly and there is no leader in DCS - postgres was running as ``replica`` wasn't ``shut down in recovery`` (cleanly) and we need to run ``pg_rewind`` to join back to the cluster. - ``pg_rewind`` is executed if it is necessary, or optinally, the data directory could be removed if it is allowed by configuration. - after ``crash recovery`` and/or ``pg_rewind`` are executed, postgres is started in recovery. :returns: action message, describing what was performed. """ if self.has_lock() and self.update_lock(): timeout = self.global_config.primary_start_timeout if timeout == 0: # We are requested to prefer failing over to restarting primary. But see first if there # is anyone to fail over to. if self.is_failover_possible(): self.watchdog.disable() logger.info("Primary crashed. Failing over.") self.demote('immediate') return 'stopped PostgreSQL to fail over after a crash' else: timeout = None data = self.state_handler.controldata() logger.info('pg_controldata:\n%s\n', '\n'.join(' {0}: {1}'.format(k, v) for k, v in data.items())) # timeout > 0 indicates that we still have the leader lock, and it was just updated if timeout\ and data.get('Database cluster state') in ('in production', 'in crash recovery', 'shutting down', 'shut down')\ and self.state_handler.state == 'crashed'\ and self.state_handler.role in ('primary', 'master')\ and not self.state_handler.config.recovery_conf_exists(): # We know 100% that we were running as a primary a few moments ago, therefore could just start postgres msg = 'starting primary after failure' if self._async_executor.try_run_async(msg, self.state_handler.start, args=(timeout, self._async_executor.critical_task)) is None: self.recovering = True return msg # Postgres is not running, and we will restart in standby mode. Watchdog is not needed until we promote. self.watchdog.disable() if data.get('Database cluster state') in ('in production', 'shutting down', 'in crash recovery'): msg = self._handle_crash_recovery() if msg: return msg self.load_cluster_from_dcs() role = 'replica' if self.has_lock() and not self.is_standby_cluster(): self._rewind.reset_state() # we want to later trigger CHECKPOINT after promote msg = "starting as readonly because i had the session lock" node_to_follow = None else: if not self._rewind.executed: self._rewind.trigger_check_diverged_lsn() msg = self._handle_rewind_or_reinitialize() if msg: return msg if self.has_lock(): # in standby cluster msg = "starting as a standby leader because i had the session lock" role = 'standby_leader' node_to_follow = self._get_node_to_follow(self.cluster) elif self.is_standby_cluster() and self.cluster.is_unlocked(): msg = "trying to follow a remote member because standby cluster is unhealthy" node_to_follow = self.get_remote_member() else: msg = "starting as a secondary" node_to_follow = self._get_node_to_follow(self.cluster) if self.is_synchronous_mode(): self.state_handler.sync_handler.set_synchronous_standby_names(CaseInsensitiveSet()) if self._async_executor.try_run_async('restarting after failure', self.state_handler.follow, args=(node_to_follow, role, timeout)) is None: self.recovering = True return msg def _get_node_to_follow(self, cluster: Cluster) -> Union[Leader, Member, None]: """Determine the node to follow. :param cluster: the currently known cluster state from DCS. :returns: the node which we should be replicating from. """ # The standby leader or when there is no standby leader we want to follow # the remote member, except when there is no standby leader in pause. if self.is_standby_cluster() \ and (cluster.leader and cluster.leader.name and cluster.leader.name == self.state_handler.name or cluster.is_unlocked() and not self.is_paused()): node_to_follow = self.get_remote_member() # If replicatefrom tag is set, try to follow the node mentioned there, otherwise, follow the leader. elif self.patroni.replicatefrom and self.patroni.replicatefrom != self.state_handler.name: node_to_follow = cluster.get_member(self.patroni.replicatefrom) else: node_to_follow = cluster.leader if cluster.leader and cluster.leader.name else None node_to_follow = node_to_follow if node_to_follow and node_to_follow.name != self.state_handler.name else None if node_to_follow and not isinstance(node_to_follow, RemoteMember): # we are going to abuse Member.data to pass following parameters params = ('restore_command', 'archive_cleanup_command') for param in params: # It is highly unlikely to happen, but we want to protect from the case node_to_follow.data.pop(param, None) # when above-mentioned params came from outside. if self.is_standby_cluster(): standby_config = self.global_config.get_standby_cluster_config() node_to_follow.data.update({p: standby_config[p] for p in params if standby_config.get(p)}) return node_to_follow def follow(self, demote_reason: str, follow_reason: str, refresh: bool = True) -> str: if refresh: self.load_cluster_from_dcs() is_leader = self.state_handler.is_primary() node_to_follow = self._get_node_to_follow(self.cluster) if self.is_paused(): if not (self._rewind.is_needed and self._rewind.can_rewind_or_reinitialize_allowed)\ or self.cluster.is_unlocked(): if is_leader: self.state_handler.set_role('master') return 'continue to run as primary without lock' elif self.state_handler.role != 'standby_leader': self.state_handler.set_role('replica') if not node_to_follow: return 'no action. I am ({0})'.format(self.state_handler.name) elif is_leader: self.demote('immediate-nolock') return demote_reason if self.is_standby_cluster() and self._leader_timeline and \ self.state_handler.get_history(self._leader_timeline + 1): self._rewind.trigger_check_diverged_lsn() if not self.state_handler.is_starting(): msg = self._handle_rewind_or_reinitialize() if msg: return msg if not self.is_paused(): self.state_handler.handle_parameter_change() role = 'standby_leader' if isinstance(node_to_follow, RemoteMember) and self.has_lock(False) else 'replica' # It might happen that leader key in the standby cluster references non-exiting member. # In this case it is safe to continue running without changing recovery.conf if self.is_standby_cluster() and role == 'replica' and not (node_to_follow and node_to_follow.conn_url): return 'continue following the old known standby leader' else: change_required, restart_required = self.state_handler.config.check_recovery_conf(node_to_follow) if change_required: if restart_required: self._async_executor.try_run_async('changing primary_conninfo and restarting', self.state_handler.follow, args=(node_to_follow, role)) else: self.state_handler.follow(node_to_follow, role, do_reload=True) self._rewind.trigger_check_diverged_lsn() elif role == 'standby_leader' and self.state_handler.role != role: self.state_handler.set_role(role) self.state_handler.call_nowait(CallbackAction.ON_ROLE_CHANGE) return follow_reason def is_synchronous_mode(self) -> bool: """:returns: `True` if synchronous replication is requested.""" return self.global_config.is_synchronous_mode def is_failsafe_mode(self) -> bool: """:returns: `True` if failsafe_mode is enabled in global configuration.""" return self.global_config.check_mode('failsafe_mode') def process_sync_replication(self) -> None: """Process synchronous standby beahvior. Synchronous standbys are registered in two places postgresql.conf and DCS. The order of updating them must be right. The invariant that should be kept is that if a node is primary and sync_standby is set in DCS, then that node must have synchronous_standby set to that value. Or more simple, first set in postgresql.conf and then in DCS. When removing, first remove in DCS, then in postgresql.conf. This is so we only consider promoting standbys that were guaranteed to be replicating synchronously. """ if self.is_synchronous_mode(): sync = self.cluster.sync if sync.is_empty: # corner case: we need to explicitly enable synchronous mode by updating the # ``/sync`` key with the current leader name and empty members. In opposite case # it will never be automatically enabled if there are not eligible candidates. sync = self.dcs.write_sync_state(self.state_handler.name, None, version=sync.version) if not sync: return logger.warning("Updating sync state failed") logger.info("Enabled synchronous replication") current = CaseInsensitiveSet(sync.members) picked, allow_promote = self.state_handler.sync_handler.current_state(self.cluster) if picked == current and current != allow_promote: logger.warning('Inconsistent state between synchronous_standby_names = %s and /sync = %s key ' 'detected, updating synchronous replication key...', list(allow_promote), list(current)) sync = self.dcs.write_sync_state(self.state_handler.name, allow_promote, version=sync.version) if not sync: return logger.warning("Updating sync state failed") current = CaseInsensitiveSet(sync.members) if picked != current: # update synchronous standby list in dcs temporarily to point to common nodes in current and picked sync_common = current & allow_promote if sync_common != current: logger.info("Updating synchronous privilege temporarily from %s to %s", list(current), list(sync_common)) sync = self.dcs.write_sync_state(self.state_handler.name, sync_common, version=sync.version) if not sync: return logger.info('Synchronous replication key updated by someone else.') # When strict mode and no suitable replication connections put "*" to synchronous_standby_names if self.global_config.is_synchronous_mode_strict and not picked: picked = CaseInsensitiveSet('*') logger.warning("No standbys available!") # Update postgresql.conf and wait 2 secs for changes to become active logger.info("Assigning synchronous standby status to %s", list(picked)) self.state_handler.sync_handler.set_synchronous_standby_names(picked) if picked and picked != CaseInsensitiveSet('*') and allow_promote != picked: # Wait for PostgreSQL to enable synchronous mode and see if we can immediately set sync_standby time.sleep(2) _, allow_promote = self.state_handler.sync_handler.current_state(self.cluster) if allow_promote and allow_promote != sync_common: if not self.dcs.write_sync_state(self.state_handler.name, allow_promote, version=sync.version): return logger.info("Synchronous replication key updated by someone else") logger.info("Synchronous standby status assigned to %s", list(allow_promote)) else: if not self.cluster.sync.is_empty and self.dcs.delete_sync_state(version=self.cluster.sync.version): logger.info("Disabled synchronous replication") self.state_handler.sync_handler.set_synchronous_standby_names(CaseInsensitiveSet()) def is_sync_standby(self, cluster: Cluster) -> bool: """:returns: `True` if the current node is a synchronous standby.""" return bool(cluster.leader) and cluster.sync.leader_matches(cluster.leader.name) \ and cluster.sync.matches(self.state_handler.name) def while_not_sync_standby(self, func: Callable[..., Any]) -> Any: """Runs specified action while trying to make sure that the node is not assigned synchronous standby status. Tags us as not allowed to be a sync standby as we are going to go away, if we currently are wait for leader to notice and pick an alternative one or if the leader changes or goes away we are also free. If the connection to DCS fails we run the action anyway, as this is only a hint. There is a small race window where this function runs between a primary picking us the sync standby and publishing it to the DCS. As the window is rather tiny consequences are holding up commits for one cycle period we don't worry about it here.""" if not self.is_synchronous_mode() or self.patroni.nosync: return func() with self._member_state_lock: self._disable_sync += 1 try: if self.touch_member(): # Primary should notice the updated value during the next cycle. We will wait double that, if primary # hasn't noticed the value by then not disabling sync replication is not likely to matter. for _ in polling_loop(timeout=self.dcs.loop_wait * 2, interval=2): try: if not self.is_sync_standby(self.dcs.get_cluster()): break except DCSError: logger.warning("Could not get cluster state, skipping synchronous standby disable") break logger.info("Waiting for primary to release us from synchronous standby") else: logger.warning("Updating member state failed, skipping synchronous standby disable") return func() finally: with self._member_state_lock: self._disable_sync -= 1 def update_cluster_history(self) -> None: primary_timeline = self.state_handler.get_primary_timeline() cluster_history = self.cluster.history.lines if self.cluster.history else [] if primary_timeline == 1: if cluster_history: self.dcs.set_history_value('[]') elif not cluster_history or cluster_history[-1][0] != primary_timeline - 1 or len(cluster_history[-1]) != 5: cluster_history_dict: Dict[int, List[Any]] = {line[0]: list(line) for line in cluster_history} history: List[List[Any]] = list(map(list, self.state_handler.get_history(primary_timeline))) if self.cluster.config: history = history[-self.cluster.config.max_timelines_history:] for line in history: # enrich current history with promotion timestamps stored in DCS cluster_history_line = cluster_history_dict.get(line[0], []) if len(line) == 3 and len(cluster_history_line) >= 4 and cluster_history_line[1] == line[1]: line.append(cluster_history_line[3]) if len(cluster_history_line) == 5: line.append(cluster_history_line[4]) if history: self.dcs.set_history_value(json.dumps(history, separators=(',', ':'))) def enforce_follow_remote_member(self, message: str) -> str: demote_reason = 'cannot be a real primary in standby cluster' return self.follow(demote_reason, message) def enforce_primary_role(self, message: str, promote_message: str) -> str: """ Ensure the node that has won the race for the leader key meets criteria for promoting its PG server to the 'primary' role. """ if not self.is_paused(): if not self.watchdog.is_running and not self.watchdog.activate(): if self.state_handler.is_primary(): self.demote('immediate') return 'Demoting self because watchdog could not be activated' else: self.release_leader_key_voluntarily() return 'Not promoting self because watchdog could not be activated' with self._async_response: if self._async_response.result is False: logger.warning("Releasing the leader key voluntarily because the pre-promote script failed") self._released_leader_key_timestamp = time.time() self.release_leader_key_voluntarily() # discard the result of the failed pre-promote script to be able to re-try promote self._async_response.reset() return 'Promotion cancelled because the pre-promote script failed' if self.state_handler.is_primary(): # Inform the state handler about its primary role. # It may be unaware of it if postgres is promoted manually. self.state_handler.set_role('master') self.process_sync_replication() self.update_cluster_history() self.state_handler.citus_handler.sync_pg_dist_node(self.cluster) return message elif self.state_handler.role in ('master', 'promoted', 'primary'): self.process_sync_replication() return message else: if self.is_synchronous_mode(): # Just set ourselves as the authoritative source of truth for now. We don't want to wait for standbys # to connect. We will try finding a synchronous standby in the next cycle. if not self.dcs.write_sync_state(self.state_handler.name, None, version=self.cluster.sync.version): # Somebody else updated sync state, it may be due to us losing the lock. To be safe, postpone # promotion until next cycle. TODO: trigger immediate retry of run_cycle return 'Postponing promotion because synchronous replication state was updated by somebody else' self.state_handler.sync_handler.set_synchronous_standby_names( CaseInsensitiveSet('*') if self.global_config.is_synchronous_mode_strict else CaseInsensitiveSet()) if self.state_handler.role not in ('master', 'promoted', 'primary'): # reset failsafe state when promote self._failsafe.set_is_active(0) def before_promote(): self.notify_citus_coordinator('before_promote') with self._async_response: self._async_response.reset() self._async_executor.try_run_async('promote', self.state_handler.promote, args=(self.dcs.loop_wait, self._async_response, before_promote)) return promote_message def fetch_node_status(self, member: Member) -> _MemberStatus: """This function perform http get request on member.api_url and fetches its status :returns: `_MemberStatus` object """ try: response = self.patroni.request(member, timeout=2, retries=0) data = response.data.decode('utf-8') logger.info('Got response from %s %s: %s', member.name, member.api_url, data) return _MemberStatus.from_api_response(member, json.loads(data)) except Exception as e: logger.warning("Request failed to %s: GET %s (%s)", member.name, member.api_url, e) return _MemberStatus.unknown(member) def fetch_nodes_statuses(self, members: List[Member]) -> List[_MemberStatus]: if not members: return [] pool = ThreadPool(len(members)) results = pool.map(self.fetch_node_status, members) # Run API calls on members in parallel pool.close() pool.join() return results def update_failsafe(self, data: Dict[str, Any]) -> Optional[str]: if self.state_handler.state == 'running' and self.state_handler.role in ('master', 'primary'): return 'Running as a leader' self._failsafe.update(data) def failsafe_is_active(self) -> bool: return self._failsafe.is_active() def call_failsafe_member(self, data: Dict[str, Any], member: Member) -> bool: try: response = self.patroni.request(member, 'post', 'failsafe', data, timeout=2, retries=1) response_data = response.data.decode('utf-8') logger.info('Got response from %s %s: %s', member.name, member.api_url, response_data) return response.status == 200 and response_data == 'Accepted' except Exception as e: logger.warning("Request failed to %s: POST %s (%s)", member.name, member.api_url, e) return False def check_failsafe_topology(self) -> bool: """Check whether we could continue to run as a primary by calling all members from the failsafe topology. .. note:: If the ``/failsafe`` key contains invalid data or if the ``name`` of our node is missing in the ``/failsafe`` key, we immediately give up and return ``False``. We send the JSON document in the POST request with the following fields: * ``name`` - the name of our node; * ``conn_url`` - connection URL to the postgres, which is reachable from other nodes; * ``api_url`` - connection URL to Patroni REST API on this node reachable from other nodes; * ``slots`` - a :class:`dict` with replication slots that exist on the leader node, including the primary itself with the last known LSN, because there could be a permanent physical slot on standby nodes. Standby nodes are using information from the ``slots`` dict to advance position of permanent replication slots while DCS is not accessible in order to avoid indefinite growth of ``pg_wal``. :returns: ``True`` if all members from the ``/failsafe`` topology agree that this node could continue to run as a ``primary``, or ``False`` if some of standby nodes are not accessible or don't agree. """ failsafe = self.dcs.failsafe if not isinstance(failsafe, dict) or self.state_handler.name not in failsafe: return False data: Dict[str, Any] = { 'name': self.state_handler.name, 'conn_url': self.state_handler.connection_string, 'api_url': self.patroni.api.connection_string, } try: data['slots'] = { **self.state_handler.slots(), slot_name_from_member_name(self.state_handler.name): self.state_handler.last_operation() } except Exception: logger.exception('Exception when called state_handler.slots()') members = [RemoteMember(name, {'api_url': url}) for name, url in failsafe.items() if name != self.state_handler.name] if not members: # A sinlge node cluster return True pool = ThreadPool(len(members)) call_failsafe_member = functools.partial(self.call_failsafe_member, data) results = pool.map(call_failsafe_member, members) pool.close() pool.join() return all(results) def is_lagging(self, wal_position: int) -> bool: """Returns if instance with an wal should consider itself unhealthy to be promoted due to replication lag. :param wal_position: Current wal position. :returns True when node is lagging """ lag = (self.cluster.last_lsn or 0) - wal_position return lag > self.global_config.maximum_lag_on_failover def _is_healthiest_node(self, members: Collection[Member], check_replication_lag: bool = True) -> bool: """This method tries to determine whether I am healthy enough to became a new leader candidate or not.""" my_wal_position = self.state_handler.last_operation() if check_replication_lag and self.is_lagging(my_wal_position): logger.info('My wal position exceeds maximum replication lag') return False # Too far behind last reported wal position on primary if not self.is_standby_cluster() and self.check_timeline(): cluster_timeline = self.cluster.timeline my_timeline = self.state_handler.replica_cached_timeline(cluster_timeline) if my_timeline is None: logger.info('Can not figure out my timeline') return False if my_timeline < cluster_timeline: logger.info('My timeline %s is behind last known cluster timeline %s', my_timeline, cluster_timeline) return False # Prepare list of nodes to run check against members = [m for m in members if m.name != self.state_handler.name and not m.nofailover and m.api_url] for st in self.fetch_nodes_statuses(members): if st.failover_limitation() is None: if st.in_recovery is False: logger.warning('Primary (%s) is still alive', st.member.name) return False if my_wal_position < st.wal_position: logger.info('Wal position of %s is ahead of my wal position', st.member.name) # In synchronous mode the former leader might be still accessible and even be ahead of us. # We should not disqualify himself from the leader race in such a situation. if not self.sync_mode_is_active() or not self.cluster.sync.leader_matches(st.member.name): return False logger.info('Ignoring the former leader being ahead of us') if my_wal_position == st.wal_position and self.patroni.failover_priority < st.failover_priority: # There's a higher priority non-lagging replica logger.info( '%s has equally tolerable WAL position and priority %s, while this node has priority %s', st.member.name, st.failover_priority, self.patroni.failover_priority, ) return False return True def is_failover_possible(self, *, cluster_lsn: int = 0, exclude_failover_candidate: bool = False) -> bool: """Checks whether any of the cluster members is allowed to promote and is healthy enough for that. :param cluster_lsn: to calculate replication lag and exclude member if it is lagging. :param exclude_failover_candidate: if ``True``, exclude :attr:`failover.candidate` from the members list against which the failover possibility checks are run. :returns: `True` if there are members eligible to become the new leader. """ candidates = self.get_failover_candidates(exclude_failover_candidate) action = self._get_failover_action_name() if self.is_synchronous_mode() and self.cluster.failover and self.cluster.failover.candidate and not candidates: logger.warning('%s candidate=%s does not match with sync_standbys=%s', action.title(), self.cluster.failover.candidate, self.cluster.sync.sync_standby) elif not candidates: logger.warning('%s: candidates list is empty', action) ret = False cluster_timeline = self.cluster.timeline for st in self.fetch_nodes_statuses(candidates): not_allowed_reason = st.failover_limitation() if not_allowed_reason: logger.info('Member %s is %s', st.member.name, not_allowed_reason) elif cluster_lsn and st.wal_position < cluster_lsn or \ not cluster_lsn and self.is_lagging(st.wal_position): logger.info('Member %s exceeds maximum replication lag', st.member.name) elif self.check_timeline() and (not st.timeline or st.timeline < cluster_timeline): logger.info('Timeline %s of member %s is behind the cluster timeline %s', st.timeline, st.member.name, cluster_timeline) else: ret = True return ret def manual_failover_process_no_leader(self) -> Optional[bool]: """Handles manual failover/switchover when the old leader already stepped down. :returns: - `True` if the current node is the best candidate to become the new leader - `None` if the current node is running as a primary and requested candidate doesn't exist """ failover = self.cluster.failover if TYPE_CHECKING: # pragma: no cover assert failover is not None action = self._get_failover_action_name() if failover.candidate: # manual failover/switchover to specific member if failover.candidate == self.state_handler.name: # manual failover/switchover to me return True elif self.is_paused(): # Remove failover key if the node to failover has terminated to avoid waiting for it indefinitely # In order to avoid attempts to delete this key from all nodes only the primary is allowed to do it. if not self.cluster.get_member(failover.candidate, fallback_to_leader=False)\ and self.state_handler.is_primary(): logger.warning("%s: removing failover key because failover candidate is not running", action) self.dcs.manual_failover('', '', version=failover.version) return None return False # in synchronous mode when our name is not in the /sync key # we shouldn't take any action even if the candidate is unhealthy if self.is_synchronous_mode() and not self.cluster.sync.matches(self.state_handler.name, True): return False # find specific node and check that it is healthy member = self.cluster.get_member(failover.candidate, fallback_to_leader=False) if isinstance(member, Member): st = self.fetch_node_status(member) not_allowed_reason = st.failover_limitation() if not_allowed_reason is None: # node is healthy logger.info('%s: to %s, i am %s', action, st.member.name, self.state_handler.name) return False # we wanted to failover/switchover to specific member but it is not healthy logger.warning('%s: member %s is %s', action, st.member.name, not_allowed_reason) # at this point we should consider all members as a candidates for failover/switchover # i.e. we assume that failover.candidate is None elif self.is_paused(): return False # try to pick some other members for switchover and check that they are healthy if failover.leader: if self.state_handler.name == failover.leader: # I was the leader # exclude desired member which is unhealthy if it was specified if self.is_failover_possible(exclude_failover_candidate=bool(failover.candidate)): return False else: # I was the leader and it looks like currently I am the only healthy member return True # at this point we assume that our node is a candidate for a failover among all nodes except former leader # exclude former leader from the list (failover.leader can be None) members = [m for m in self.cluster.members if m.name != failover.leader] return self._is_healthiest_node(members, check_replication_lag=False) def is_healthiest_node(self) -> bool: """Performs a series of checks to determine that the current node is the best candidate. In case if manual failover/switchover is requested it calls :func:`manual_failover_process_no_leader` method. :returns: `True` if the current node is among the best candidates to become the new leader. """ if time.time() - self._released_leader_key_timestamp < self.dcs.ttl: logger.info('backoff: skip leader race after pre_promote script failure and releasing the lock voluntarily') return False if self.is_paused() and not self.patroni.nofailover and \ self.cluster.failover and not self.cluster.failover.scheduled_at: ret = self.manual_failover_process_no_leader() if ret is not None: # continue if we just deleted the stale failover key as a leader return ret if self.state_handler.is_primary(): if self.is_paused(): # in pause leader is the healthiest only when no initialize or sysid matches with initialize! return not self.cluster.initialize or self.state_handler.sysid == self.cluster.initialize # We want to protect from the following scenario: # 1. node1 is stressed so much that heart-beat isn't running regularly and the leader lock expires. # 2. node2 promotes, gets heavy load and the situation described in 1 repeats. # 3. Patroni on node1 comes back, notices that Postgres is running as primary but there is # no leader key and "happily" acquires the leader lock. # That is, node1 discarded promotion of node2. To avoid it we want to detect timeline change. my_timeline = self.state_handler.get_primary_timeline() if my_timeline < self.cluster.timeline: logger.warning('My timeline %s is behind last known cluster timeline %s', my_timeline, self.cluster.timeline) return False return True if self.is_paused(): return False if self.patroni.nofailover: # nofailover tag makes node always unhealthy return False if self.cluster.failover: # When doing a switchover in synchronous mode only synchronous nodes and former leader are allowed to race if self.cluster.failover.leader and self.sync_mode_is_active() \ and not self.cluster.sync.matches(self.state_handler.name, True): return False return self.manual_failover_process_no_leader() or False if not self.watchdog.is_healthy: logger.warning('Watchdog device is not usable') return False all_known_members = self.old_cluster.members if self.is_failsafe_mode(): failsafe_members = self.dcs.failsafe # We want to discard failsafe_mode if the /failsafe key contains garbage or empty. if isinstance(failsafe_members, dict): # If current node is missing in the /failsafe key we immediately disqualify it from the race. if failsafe_members and self.state_handler.name not in failsafe_members: return False # Race among not only existing cluster members, but also all known members from the failsafe config all_known_members += [RemoteMember(name, {'api_url': url}) for name, url in failsafe_members.items()] all_known_members += self.cluster.members # When in sync mode, only last known primary and sync standby are allowed to promote automatically. if self.sync_mode_is_active(): if not self.cluster.sync.matches(self.state_handler.name, True): return False # pick between synchronous candidates so we minimize unnecessary failovers/demotions members = {m.name: m for m in all_known_members if self.cluster.sync.matches(m.name, True)} else: # run usual health check members = {m.name: m for m in all_known_members} return self._is_healthiest_node(members.values()) def _delete_leader(self, last_lsn: Optional[int] = None) -> None: self.set_is_leader(False) self.dcs.delete_leader(self.cluster.leader, last_lsn) self.dcs.reset_cluster() def release_leader_key_voluntarily(self, last_lsn: Optional[int] = None) -> None: self._delete_leader(last_lsn) self.touch_member() logger.info("Leader key released") def demote(self, mode: str) -> Optional[bool]: """Demote PostgreSQL running as primary. :param mode: One of offline, graceful, immediate or immediate-nolock. ``offline`` is used when connection to DCS is not available. ``graceful`` is used when failing over to another node due to user request. May only be called running async. ``immediate`` is used when we determine that we are not suitable for primary and want to failover quickly without regard for data durability. May only be called synchronously. ``immediate-nolock`` is used when find out that we have lost the lock to be primary. Need to bring down PostgreSQL as quickly as possible without regard for data durability. May only be called synchronously. """ mode_control = { 'offline': dict(stop='fast', checkpoint=False, release=False, offline=True, async_req=False), # noqa: E241,E501 'graceful': dict(stop='fast', checkpoint=True, release=True, offline=False, async_req=False), # noqa: E241,E501 'immediate': dict(stop='immediate', checkpoint=False, release=True, offline=False, async_req=True), # noqa: E241,E501 'immediate-nolock': dict(stop='immediate', checkpoint=False, release=False, offline=False, async_req=True), # noqa: E241,E501 }[mode] logger.info('Demoting self (%s)', mode) self._rewind.trigger_check_diverged_lsn() status = {'released': False} def on_shutdown(checkpoint_location: int, prev_location: int) -> None: # Postmaster is still running, but pg_control already reports clean "shut down". # It could happen if Postgres is still archiving the backlog of WAL files. # If we know that there are replicas that received the shutdown checkpoint # location, we can remove the leader key and allow them to start leader race. time.sleep(1) # give replicas some more time to catch up if self.is_failover_possible(cluster_lsn=checkpoint_location): self.state_handler.set_role('demoted') with self._async_executor: self.release_leader_key_voluntarily(prev_location) status['released'] = True def before_shutdown() -> None: if self.state_handler.citus_handler.is_coordinator(): self.state_handler.citus_handler.on_demote() else: self.notify_citus_coordinator('before_demote') self.state_handler.stop(str(mode_control['stop']), checkpoint=bool(mode_control['checkpoint']), on_safepoint=self.watchdog.disable if self.watchdog.is_running else None, on_shutdown=on_shutdown if mode_control['release'] else None, before_shutdown=before_shutdown if mode == 'graceful' else None, stop_timeout=self.primary_stop_timeout()) self.state_handler.set_role('demoted') self.set_is_leader(False) if mode_control['release']: if not status['released']: checkpoint_location = self.state_handler.latest_checkpoint_location() if mode == 'graceful' else None with self._async_executor: self.release_leader_key_voluntarily(checkpoint_location) time.sleep(2) # Give a time to somebody to take the leader lock if mode_control['offline']: node_to_follow, leader = None, None else: try: cluster = self.dcs.get_cluster() node_to_follow, leader = self._get_node_to_follow(cluster), cluster.leader except Exception: node_to_follow, leader = None, None if self.is_synchronous_mode(): self.state_handler.sync_handler.set_synchronous_standby_names(CaseInsensitiveSet()) # FIXME: with mode offline called from DCS exception handler and handle_long_action_in_progress # there could be an async action already running, calling follow from here will lead # to racy state handler state updates. if mode_control['async_req']: self._async_executor.try_run_async('starting after demotion', self.state_handler.follow, (node_to_follow,)) else: if self._rewind.rewind_or_reinitialize_needed_and_possible(leader): return False # do not start postgres, but run pg_rewind on the next iteration self.state_handler.follow(node_to_follow) def should_run_scheduled_action(self, action_name: str, scheduled_at: Optional[datetime.datetime], cleanup_fn: Callable[..., Any]) -> bool: if scheduled_at and not self.is_paused(): # If the scheduled action is in the far future, we shouldn't do anything and just return. # If the scheduled action is in the past, we consider the value to be stale and we remove # the value. # If the value is close to now, we initiate the scheduled action # Additionally, if the scheduled action cannot be executed altogether, i.e. there is an error # or the action is in the past - we take care of cleaning it up. now = datetime.datetime.now(tzutc) try: delta = (scheduled_at - now).total_seconds() if delta > self.dcs.loop_wait: logger.info('Awaiting %s at %s (in %.0f seconds)', action_name, scheduled_at.isoformat(), delta) return False elif delta < - int(self.dcs.loop_wait * 1.5): # This means that if run_cycle gets delayed for 2.5x loop_wait we skip the # scheduled action. Probably not a problem, if things are that bad we don't # want to be restarting or failing over anyway. logger.warning('Found a stale %s value, cleaning up: %s', action_name, scheduled_at.isoformat()) cleanup_fn() return False # The value is very close to now time.sleep(max(delta, 0)) logger.info('Manual scheduled {0} at %s'.format(action_name), scheduled_at.isoformat()) return True except TypeError: logger.warning('Incorrect value of scheduled_at: %s', scheduled_at) cleanup_fn() return False def process_manual_failover_from_leader(self) -> Optional[str]: """Checks if manual failover is requested and takes action if appropriate. Cleans up failover key if failover conditions are not matched. :returns: action message if demote was initiated, None if no action was taken""" failover = self.cluster.failover # if there is no failover key or # I am holding the lock but am not primary = I am the standby leader, # then do nothing if not failover or (self.is_paused() and not self.state_handler.is_primary()): return action = self._get_failover_action_name() bare_action = action.replace('manual ', '') # it is not the time for the scheduled switchover yet, do nothing if (failover.scheduled_at and not self.should_run_scheduled_action(bare_action, failover.scheduled_at, lambda: self.dcs.manual_failover('', '', version=failover.version))): return if not failover.leader or failover.leader == self.state_handler.name: if not failover.candidate or failover.candidate != self.state_handler.name: if not failover.candidate and self.is_paused(): logger.warning('%s is possible only to a specific candidate in a paused state', action.title()) elif self.is_failover_possible(): ret = self._async_executor.try_run_async(f'{action}: demote', self.demote, ('graceful',)) return ret or f'{action}: demoting myself' else: logger.warning('%s: no healthy members found, %s is not possible', action, bare_action) else: logger.warning('%s: I am already the leader, no need to %s', action, bare_action) else: logger.warning('%s: leader name does not match: %s != %s', action, failover.leader, self.state_handler.name) logger.info('Cleaning up failover key') self.dcs.manual_failover('', '', version=failover.version) def process_unhealthy_cluster(self) -> str: """Cluster has no leader key""" if self.is_healthiest_node(): if self.acquire_lock(): failover = self.cluster.failover if failover: if self.is_paused() and failover.leader and failover.candidate: logger.info('Updating failover key after acquiring leader lock...') self.dcs.manual_failover('', failover.candidate, failover.scheduled_at, failover.version) else: logger.info('Cleaning up failover key after acquiring leader lock...') self.dcs.manual_failover('', '') self.load_cluster_from_dcs() if self.is_standby_cluster(): # standby leader disappeared, and this is the healthiest # replica, so it should become a new standby leader. # This implies we need to start following a remote member msg = 'promoted self to a standby leader by acquiring session lock' return self.enforce_follow_remote_member(msg) else: return self.enforce_primary_role( 'acquired session lock as a leader', 'promoted self to leader by acquiring session lock' ) else: return self.follow('demoted self after trying and failing to obtain lock', 'following new leader after trying and failing to obtain lock') else: # when we are doing manual failover there is no guaranty that new leader is ahead of any other node # node tagged as nofailover can be ahead of the new leader either, but it is always excluded from elections if bool(self.cluster.failover) or self.patroni.nofailover: self._rewind.trigger_check_diverged_lsn() time.sleep(2) # Give a time to somebody to take the leader lock if self.patroni.nofailover: return self.follow('demoting self because I am not allowed to become primary', 'following a different leader because I am not allowed to promote') return self.follow('demoting self because i am not the healthiest node', 'following a different leader because i am not the healthiest node') def process_healthy_cluster(self) -> str: if self.has_lock(): if self.is_paused() and not self.state_handler.is_primary(): if self.cluster.failover and self.cluster.failover.candidate == self.state_handler.name: return 'waiting to become primary after promote...' if not self.is_standby_cluster(): self._delete_leader() return 'removed leader lock because postgres is not running as primary' # update lock to avoid split-brain if self.update_lock(True): msg = self.process_manual_failover_from_leader() if msg is not None: return msg # check if the node is ready to be used by pg_rewind self._rewind.ensure_checkpoint_after_promote(self.wakeup) if self.is_standby_cluster(): # in case of standby cluster we don't really need to # enforce anything, since the leader is not a primary # So just remind the role. msg = 'no action. I am ({0}), the standby leader with the lock'.format(self.state_handler.name) \ if self.state_handler.role == 'standby_leader' else \ 'promoted self to a standby leader because i had the session lock' return self.enforce_follow_remote_member(msg) else: return self.enforce_primary_role( 'no action. I am ({0}), the leader with the lock'.format(self.state_handler.name), 'promoted self to leader because I had the session lock' ) else: # Either there is no connection to DCS or someone else acquired the lock logger.error('failed to update leader lock') if self.state_handler.is_primary(): if self.is_paused(): return 'continue to run as primary after failing to update leader lock in DCS' self.demote('immediate-nolock') return 'demoted self because failed to update leader lock in DCS' else: return 'not promoting because failed to update leader lock in DCS' else: logger.debug('does not have lock') lock_owner = self.cluster.leader and self.cluster.leader.name if self.is_standby_cluster(): return self.follow('cannot be a real primary in a standby cluster', 'no action. I am ({0}), a secondary, and following a standby leader ({1})'.format( self.state_handler.name, lock_owner), refresh=False) return self.follow('demoting self because I do not have the lock and I was a leader', 'no action. I am ({0}), a secondary, and following a leader ({1})'.format( self.state_handler.name, lock_owner), refresh=False) def evaluate_scheduled_restart(self) -> Optional[str]: if self._async_executor.busy: # Restart already in progress return None # restart if we need to restart_data = self.future_restart_scheduled() if restart_data: recent_time = self.state_handler.postmaster_start_time() request_time = restart_data['postmaster_start_time'] # check if postmaster start time has changed since the last restart if recent_time and request_time and recent_time != request_time: logger.info("Cancelling scheduled restart: postgres restart has already happened at %s", recent_time) self.delete_future_restart() return None if restart_data\ and self.should_run_scheduled_action('restart', restart_data['schedule'], self.delete_future_restart): try: ret, message = self.restart(restart_data, run_async=True) if not ret: logger.warning("Scheduled restart: %s", message) return None return message finally: self.delete_future_restart() def restart_matches(self, role: Optional[str], postgres_version: Optional[str], pending_restart: bool) -> bool: reason_to_cancel = "" # checking the restart filters here seem to be less ugly than moving them into the # run_scheduled_action. if role and role != self.state_handler.role: reason_to_cancel = "host role mismatch" if postgres_version and postgres_version_to_int(postgres_version) <= int(self.state_handler.server_version): reason_to_cancel = "postgres version mismatch" if pending_restart and not self.state_handler.pending_restart: reason_to_cancel = "pending restart flag is not set" if not reason_to_cancel: return True else: logger.info("not proceeding with the restart: %s", reason_to_cancel) return False def schedule_future_restart(self, restart_data: Dict[str, Any]) -> bool: with self._async_executor: restart_data['postmaster_start_time'] = self.state_handler.postmaster_start_time() if not self.patroni.scheduled_restart: self.patroni.scheduled_restart = restart_data self.touch_member() return True return False def delete_future_restart(self) -> bool: ret = False with self._async_executor: if self.patroni.scheduled_restart: self.patroni.scheduled_restart = {} self.touch_member() ret = True return ret def future_restart_scheduled(self) -> Dict[str, Any]: return self.patroni.scheduled_restart.copy() def restart_scheduled(self) -> bool: return self._async_executor.scheduled_action == 'restart' def restart(self, restart_data: Dict[str, Any], run_async: bool = False) -> Tuple[bool, str]: """ conditional and unconditional restart """ assert isinstance(restart_data, dict) if (not self.restart_matches(restart_data.get('role'), restart_data.get('postgres_version'), ('restart_pending' in restart_data))): return (False, "restart conditions are not satisfied") with self._async_executor: prev = self._async_executor.schedule('restart') if prev is not None: return (False, prev + ' already in progress') # Make the main loop to think that we were recovering dead postgres. If we fail # to start postgres after a specified timeout (see below), we need to remove # leader key (if it belong to us) rather than trying to start postgres once again. self.recovering = True # Now that restart is scheduled we can set timeout for startup, it will get reset # once async executor runs and main loop notices PostgreSQL as up. timeout = restart_data.get('timeout', self.global_config.primary_start_timeout) self.set_start_timeout(timeout) def before_shutdown() -> None: self.notify_citus_coordinator('before_demote') def after_start() -> None: self.notify_citus_coordinator('after_promote') # For non async cases we want to wait for restart to complete or timeout before returning. do_restart = functools.partial(self.state_handler.restart, timeout, self._async_executor.critical_task, before_shutdown=before_shutdown if self.has_lock() else None, after_start=after_start if self.has_lock() else None) if self.is_synchronous_mode() and not self.has_lock(): do_restart = functools.partial(self.while_not_sync_standby, do_restart) if run_async: self._async_executor.run_async(do_restart) return (True, 'restart initiated') else: res = self._async_executor.run(do_restart) if res: return (True, 'restarted successfully') elif res is None: return (False, 'postgres is still starting') else: return (False, 'restart failed') def _do_reinitialize(self, cluster: Cluster) -> Optional[bool]: self.state_handler.stop('immediate', stop_timeout=self.patroni.config['retry_timeout']) # Commented redundant data directory cleanup here # self.state_handler.remove_data_directory() clone_member = cluster.get_clone_member(self.state_handler.name) if clone_member: member_role = 'leader' if clone_member == cluster.leader else 'replica' return self.clone(clone_member, "from {0} '{1}'".format(member_role, clone_member.name)) def reinitialize(self, force: bool = False) -> Optional[str]: with self._async_executor: self.load_cluster_from_dcs() if self.cluster.is_unlocked(): return 'Cluster has no leader, can not reinitialize' if self.has_lock(False): return 'I am the leader, can not reinitialize' cluster = self.cluster if force: self._async_executor.cancel() with self._async_executor: action = self._async_executor.schedule('reinitialize') if action is not None: return '{0} already in progress'.format(action) self._async_executor.run_async(self._do_reinitialize, args=(cluster, )) def handle_long_action_in_progress(self) -> str: """Figure out what to do with the task AsyncExecutor is performing.""" if self.has_lock() and self.update_lock(): if self._async_executor.scheduled_action == 'doing crash recovery in a single user mode': time_left = self.global_config.primary_start_timeout - (time.time() - self._crash_recovery_started) if time_left <= 0 and self.is_failover_possible(): logger.info("Demoting self because crash recovery is taking too long") self.state_handler.cancellable.cancel(True) self.demote('immediate') return 'terminated crash recovery because of startup timeout' return 'updated leader lock during {0}'.format(self._async_executor.scheduled_action) elif not self.state_handler.bootstrapping and not self.is_paused(): # Don't have lock, make sure we are not promoting or starting up a primary in the background if self._async_executor.scheduled_action == 'promote': with self._async_response: cancel = self._async_response.cancel() if cancel: self.state_handler.cancellable.cancel() return 'lost leader before promote' if self.state_handler.role in ('master', 'primary'): logger.info('Demoting primary during %s', self._async_executor.scheduled_action) if self._async_executor.scheduled_action in ('restart', 'starting primary after failure'): # Restart needs a special interlocking cancel because postmaster may be just started in a # background thread and has not even written a pid file yet. with self._async_executor.critical_task as task: if not task.cancel() and isinstance(task.result, PostmasterProcess): self.state_handler.terminate_starting_postmaster(postmaster=task.result) self.demote('immediate-nolock') return 'lost leader lock during {0}'.format(self._async_executor.scheduled_action) if self.cluster.is_unlocked(): logger.info('not healthy enough for leader race') return '{0} in progress'.format(self._async_executor.scheduled_action) @staticmethod def sysid_valid(sysid: Optional[str]) -> bool: # sysid does tv_sec << 32, where tv_sec is the number of seconds sine 1970, # so even 1 << 32 would have 10 digits. sysid = str(sysid) return len(sysid) >= 10 and sysid.isdigit() def post_recover(self) -> Optional[str]: if not self.state_handler.is_running(): self.watchdog.disable() if self.has_lock(): if self.state_handler.role in ('master', 'primary', 'standby_leader'): self.state_handler.set_role('demoted') self._delete_leader() return 'removed leader key after trying and failing to start postgres' return 'failed to start postgres' return None def cancel_initialization(self) -> None: logger.info('removing initialize key after failed attempt to bootstrap the cluster') self.dcs.cancel_initialization() self.state_handler.stop('immediate', stop_timeout=self.patroni.config['retry_timeout']) self.state_handler.move_data_directory() raise PatroniFatalException('Failed to bootstrap cluster') def post_bootstrap(self) -> str: with self._async_response: result = self._async_response.result # bootstrap has failed if postgres is not running if not self.state_handler.is_running() or result is False: self.cancel_initialization() if result is None: if not self.state_handler.is_primary(): return 'waiting for end of recovery after bootstrap' self.state_handler.set_role('master') ret = self._async_executor.try_run_async('post_bootstrap', self.state_handler.bootstrap.post_bootstrap, args=(self.patroni.config['bootstrap'], self._async_response)) return ret or 'running post_bootstrap' self.state_handler.bootstrapping = False if not self.watchdog.activate(): logger.error('Cancelling bootstrap because watchdog activation failed') self.cancel_initialization() self._rewind.ensure_checkpoint_after_promote(self.wakeup) self.dcs.initialize(create_new=(self.cluster.initialize is None), sysid=self.state_handler.sysid) self.dcs.set_config_value(json.dumps(self.patroni.config.dynamic_configuration, separators=(',', ':'))) self.dcs.take_leader() self.set_is_leader(True) if self.is_synchronous_mode(): self.state_handler.sync_handler.set_synchronous_standby_names( CaseInsensitiveSet('*') if self.global_config.is_synchronous_mode_strict else CaseInsensitiveSet()) self.state_handler.call_nowait(CallbackAction.ON_START) self.load_cluster_from_dcs() return 'initialized a new cluster' def handle_starting_instance(self) -> Optional[str]: """Starting up PostgreSQL may take a long time. In case we are the leader we may want to fail over to.""" # Check if we are in startup, when paused defer to main loop for manual failovers. if not self.state_handler.check_for_startup() or self.is_paused(): self.set_start_timeout(None) if self.is_paused(): self.state_handler.set_state(self.state_handler.is_running() and 'running' or 'stopped') return None # state_handler.state == 'starting' here if self.has_lock(): if not self.update_lock(): logger.info("Lost lock while starting up. Demoting self.") self.demote('immediate-nolock') return 'stopped PostgreSQL while starting up because leader key was lost' timeout = self._start_timeout or self.global_config.primary_start_timeout time_left = timeout - self.state_handler.time_in_state() if time_left <= 0: if self.is_failover_possible(): logger.info("Demoting self because primary startup is taking too long") self.demote('immediate') return 'stopped PostgreSQL because of startup timeout' else: return 'primary start has timed out, but continuing to wait because failover is not possible' else: msg = self.process_manual_failover_from_leader() if msg is not None: return msg return 'PostgreSQL is still starting up, {0:.0f} seconds until timeout'.format(time_left) else: # Use normal processing for standbys logger.info("Still starting up as a standby.") return None def set_start_timeout(self, value: Optional[int]) -> None: """Sets timeout for starting as primary before eligible for failover. Must be called when async_executor is busy or in the main thread. """ self._start_timeout = value def _run_cycle(self) -> str: dcs_failed = False try: try: self.load_cluster_from_dcs() self.global_config = self.patroni.config.get_global_config(self.cluster) self.state_handler.reset_cluster_info_state(self.cluster, self.patroni.nofailover, self.global_config) except Exception: self.state_handler.reset_cluster_info_state(None) raise if self.is_paused(): self.watchdog.disable() self._was_paused = True else: if self._was_paused: self.state_handler.schedule_sanity_checks_after_pause() # during pause people could manually do something with Postgres, therefore we want # to double check rewind conditions on replicas and maybe run CHECKPOINT on the primary self._rewind.reset_state() self._was_paused = False if not self.cluster.has_member(self.state_handler.name): self.touch_member() # cluster has leader key but not initialize key if not (self.cluster.is_unlocked() or self.sysid_valid(self.cluster.initialize)) and self.has_lock(): self.dcs.initialize(create_new=(self.cluster.initialize is None), sysid=self.state_handler.sysid) if not (self.cluster.is_unlocked() or self.cluster.config and self.cluster.config.data) and self.has_lock(): self.dcs.set_config_value(json.dumps(self.patroni.config.dynamic_configuration, separators=(',', ':'))) self.cluster = self.dcs.get_cluster() if self._async_executor.busy: return self.handle_long_action_in_progress() msg = self.handle_starting_instance() if msg is not None: return msg # we've got here, so any async action has finished. if self.state_handler.bootstrapping: return self.post_bootstrap() if self.recovering: self.recovering = False if not self._rewind.is_needed: # Check if we tried to recover from postgres crash and failed msg = self.post_recover() if msg is not None: return msg # Reset some states after postgres successfully started up self._crash_recovery_started = 0 if self._rewind.executed and not self._rewind.failed: self._rewind.reset_state() # The Raft cluster without a quorum takes a bit of time to stabilize. # Therefore we want to postpone the leader race if we just started up. if self.cluster.is_unlocked() and self.dcs.__class__.__name__ == 'Raft': return 'started as a secondary' # is data directory empty? data_directory_error = '' data_directory_is_empty = None try: data_directory_is_empty = self.state_handler.data_directory_empty() data_directory_is_accessible = True except OSError as e: data_directory_is_accessible = False data_directory_error = e if not data_directory_is_accessible or data_directory_is_empty: self.state_handler.set_role('uninitialized') self.state_handler.stop('immediate', stop_timeout=self.patroni.config['retry_timeout']) # In case datadir went away while we were primary self.watchdog.disable() # is this instance the leader? if self.has_lock(): self.release_leader_key_voluntarily() return 'released leader key voluntarily as data dir {0} and currently leader'.format( 'empty' if data_directory_is_accessible else 'not accessible') if not data_directory_is_accessible: return 'data directory is not accessible: {0}'.format(data_directory_error) if self.is_paused(): return 'running with empty data directory' return self.bootstrap() # new node else: # check if we are allowed to join data_sysid = self.state_handler.sysid if not self.sysid_valid(data_sysid): # data directory is not empty, but no valid sysid, cluster must be broken, suggest reinit return ("data dir for the cluster is not empty, " "but system ID is invalid; consider doing reinitialize") if self.sysid_valid(self.cluster.initialize): if self.cluster.initialize != data_sysid: if self.is_paused(): logger.warning('system ID has changed while in paused mode. Patroni will exit when resuming' ' unless system ID is reset: %s != %s', self.cluster.initialize, data_sysid) if self.has_lock(): self.release_leader_key_voluntarily() return 'released leader key voluntarily due to the system ID mismatch' else: logger.fatal('system ID mismatch, node %s belongs to a different cluster: %s != %s', self.state_handler.name, self.cluster.initialize, data_sysid) sys.exit(1) elif self.cluster.is_unlocked() and not self.is_paused() and not self.state_handler.cb_called: # "bootstrap", but data directory is not empty if self.state_handler.is_running() and not self.state_handler.is_primary(): self._join_aborted = True logger.error('No initialize key in DCS and PostgreSQL is running as replica, aborting start') logger.error('Please first start Patroni on the node running as primary') sys.exit(1) self.dcs.initialize(create_new=(self.cluster.initialize is None), sysid=data_sysid) if not self.state_handler.is_healthy(): if self.is_paused(): self.state_handler.set_state('stopped') if self.has_lock(): self._delete_leader() return 'removed leader lock because postgres is not running' # Normally we don't start Postgres in a paused state. We make an exception for the demoted primary # that needs to be started after it had been stopped by demote. When there is no need to call rewind # the demote code follows through to starting Postgres right away, however, in the rewind case # it returns from demote and reaches this point to start PostgreSQL again after rewind. In that # case it makes no sense to continue to recover() unless rewind has finished successfully. elif self._rewind.failed or not self._rewind.executed and not \ (self._rewind.is_needed and self._rewind.can_rewind_or_reinitialize_allowed): return 'postgres is not running' if self.state_handler.state in ('running', 'starting'): self.state_handler.set_state('crashed') # try to start dead postgres return self.recover() if self.cluster.is_unlocked(): ret = self.process_unhealthy_cluster() else: msg = self.process_healthy_cluster() ret = self.evaluate_scheduled_restart() or msg # We might not have a valid PostgreSQL connection here if AsyncExecutor is doing # something with PostgreSQL. Therefore we will sync replication slots only if no # asynchronous processes are running or we know that this is a standby being promoted. # But, we don't want to run pg_rewind checks or copy logical slots from itself, # therefore we have a couple additional `not is_promoting` checks. is_promoting = self._async_executor.scheduled_action == 'promote' if (not self._async_executor.busy or is_promoting) and not self.state_handler.is_starting(): create_slots = self._sync_replication_slots(False) if not self.state_handler.cb_called: if not is_promoting and not self.state_handler.is_primary(): self._rewind.trigger_check_diverged_lsn() self.state_handler.call_nowait(CallbackAction.ON_START) if not is_promoting and create_slots and self.cluster.leader: err = self._async_executor.try_run_async('copy_logical_slots', self.state_handler.slots_handler.copy_logical_slots, args=(self.cluster, create_slots)) if not err: ret = 'Copying logical slots {0} from the primary'.format(create_slots) return ret except DCSError: dcs_failed = True logger.error('Error communicating with DCS') return self._handle_dcs_error() except (psycopg.Error, PostgresConnectionException): return 'Error communicating with PostgreSQL. Will try again later' finally: if not dcs_failed: if self.is_leader(): self._failsafe.set_is_active(0) self.touch_member() def _handle_dcs_error(self) -> str: if not self.is_paused() and self.state_handler.is_running(): if self.state_handler.is_primary(): if self.is_failsafe_mode() and self.check_failsafe_topology(): self.set_is_leader(True) self._failsafe.set_is_active(time.time()) self.watchdog.keepalive() return 'continue to run as a leader because failsafe mode is enabled and all members are accessible' self._failsafe.set_is_active(0) msg = 'demoting self because DCS is not accessible and I was a leader' if not self._async_executor.try_run_async(msg, self.demote, ('offline',)): return msg logger.warning('AsyncExecutor is busy, demoting from the main thread') self.demote('offline') return 'demoted self because DCS is not accessible and I was a leader' else: self._sync_replication_slots(True) return 'DCS is not accessible' def _sync_replication_slots(self, dcs_failed: bool) -> List[str]: """Handles replication slots. :param dcs_failed: bool, indicates that communication with DCS failed (get_cluster() or update_leader()) :returns: list[str], replication slots names that should be copied from the primary """ slots: List[str] = [] # If dcs_failed we don't want to touch replication slots on a leader or replicas if failsafe_mode isn't enabled. if not self.cluster or dcs_failed and (self.is_leader() or not self.is_failsafe_mode()): return slots # It could be that DCS is read-only, or only the leader can't access it. # Only the second one could be handled by `load_cluster_from_dcs()`. # The first one affects advancing logical replication slots on replicas, therefore we rely on # Failsafe.update_cluster(), that will return "modified" Cluster if failsafe mode is active. cluster = self._failsafe.update_cluster(self.cluster)\ if self.is_failsafe_mode() and not self.is_leader() else self.cluster if cluster: slots = self.state_handler.slots_handler.sync_replication_slots(cluster, self.patroni.nofailover, self.patroni.replicatefrom, self.is_paused()) # Don't copy replication slots if failsafe_mode is active return [] if self.failsafe_is_active() else slots def run_cycle(self) -> str: with self._async_executor: try: info = self._run_cycle() return (self.is_paused() and 'PAUSE: ' or '') + info except PatroniFatalException: raise except Exception: logger.exception('Unexpected exception') return 'Unexpected exception raised, please report it as a BUG' def shutdown(self) -> None: if self.is_paused(): logger.info('Leader key is not deleted and Postgresql is not stopped due paused state') self.watchdog.disable() elif not self._join_aborted: # FIXME: If stop doesn't reach safepoint quickly enough keepalive is triggered. If shutdown checkpoint # takes longer than ttl, then leader key is lost and replication might not have sent out all WAL. # This might not be the desired behavior of users, as a graceful shutdown of the host can mean lost data. # We probably need to something smarter here. disable_wd = self.watchdog.disable if self.watchdog.is_running else None status = {'deleted': False} def _on_shutdown(checkpoint_location: int, prev_location: int) -> None: if self.is_leader(): # Postmaster is still running, but pg_control already reports clean "shut down". # It could happen if Postgres is still archiving the backlog of WAL files. # If we know that there are replicas that received the shutdown checkpoint # location, we can remove the leader key and allow them to start leader race. time.sleep(1) # give replicas some more time to catch up if self.is_failover_possible(cluster_lsn=checkpoint_location): self.dcs.delete_leader(self.cluster.leader, prev_location) status['deleted'] = True else: self.dcs.write_leader_optime(prev_location) def _before_shutdown() -> None: self.notify_citus_coordinator('before_demote') on_shutdown = _on_shutdown if self.is_leader() else None before_shutdown = _before_shutdown if self.is_leader() else None self.while_not_sync_standby(lambda: self.state_handler.stop(checkpoint=False, on_safepoint=disable_wd, on_shutdown=on_shutdown, before_shutdown=before_shutdown, stop_timeout=self.primary_stop_timeout())) if not self.state_handler.is_running(): if self.is_leader() and not status['deleted']: checkpoint_location = self.state_handler.latest_checkpoint_location() self.dcs.delete_leader(self.cluster.leader, checkpoint_location) self.touch_member() else: # XXX: what about when Patroni is started as the wrong user that has access to the watchdog device # but cannot shut down PostgreSQL. Root would be the obvious example. Would be nice to not kill the # system due to a bad config. logger.error("PostgreSQL shutdown failed, leader key not removed.%s", (" Leaving watchdog running." if self.watchdog.is_running else "")) def watch(self, timeout: float) -> bool: # watch on leader key changes if the postgres is running and leader is known and current node is not lock owner if self._async_executor.busy or not self.cluster or self.cluster.is_unlocked() or self.has_lock(False): leader_version = None else: leader_version = self.cluster.leader.version if self.cluster.leader else None return self.dcs.watch(leader_version, timeout) def wakeup(self) -> None: """Trigger the next run of HA loop if there is no "active" leader watch request in progress. This usually happens on the leader or if the node is running async action""" self.dcs.event.set() def get_remote_member(self, member: Union[Leader, Member, None] = None) -> RemoteMember: """Get remote member node to stream from. In case of standby cluster this will tell us from which remote member to stream. Config can be both patroni config or cluster.config.data. """ data: Dict[str, Any] = {} cluster_params = self.global_config.get_standby_cluster_config() if cluster_params: data.update({k: v for k, v in cluster_params.items() if k in RemoteMember.ALLOWED_KEYS}) data['no_replication_slot'] = 'primary_slot_name' not in cluster_params conn_kwargs = member.conn_kwargs() if member else \ {k: cluster_params[k] for k in ('host', 'port') if k in cluster_params} if conn_kwargs: data['conn_kwargs'] = conn_kwargs name = member.name if member else 'remote_member:{}'.format(uuid.uuid1()) return RemoteMember(name, data) def get_failover_candidates(self, exclude_failover_candidate: bool) -> List[Member]: """Return a list of candidates for either manual or automatic failover. Exclude non-sync members when in synchronous mode, the current node (its checks are always performed earlier) and the candidate if required. If failover candidate exclusion is not requested and a candidate is specified in the /failover key, return the candidate only. The result is further evaluated in the caller :func:`Ha.is_failover_possible` to check if any member is actually healthy enough and is allowed to poromote. :param exclude_failover_candidate: if ``True``, exclude :attr:`failover.candidate` from the candidates. :returns: a list of :class:`Member` ojects or an empty list if there is no candidate available. """ failover = self.cluster.failover exclude = [self.state_handler.name] + ([failover.candidate] if failover and exclude_failover_candidate else []) def is_eligible(node: Member) -> bool: # in synchronous mode we allow failover (not switchover!) to async node if self.sync_mode_is_active() and not self.cluster.sync.matches(node.name)\ and not (failover and not failover.leader): return False # Don't spend time on "nofailover" nodes checking. # We also don't need nodes which we can't query with the api in the list. return node.name not in exclude and \ not node.nofailover and bool(node.api_url) and \ (not failover or not failover.candidate or node.name == failover.candidate) return list(filter(is_eligible, self.cluster.members)) patroni-3.2.2/patroni/log.py000066400000000000000000000371321455170150700160140ustar00rootroot00000000000000"""Patroni logging facilities. Daemon processes will use a 2-step logging handler. Whenever a log message is issued it is initially enqueued in-memory and is later asynchronously flushed by a thread to the final destination. """ import logging import os import sys from copy import deepcopy from logging.handlers import RotatingFileHandler from patroni.utils import deep_compare from queue import Queue, Full from threading import Lock, Thread from typing import Any, Dict, List, Optional, Union, TYPE_CHECKING _LOGGER = logging.getLogger(__name__) def debug_exception(self: logging.Logger, msg: object, *args: Any, **kwargs: Any) -> None: """Add full stack trace info to debug log messages and partial to others. Handle :func:`~self.exception` calls for *self*. .. note:: * If *self* log level is set to ``DEBUG``, then issue a ``DEBUG`` message with the complete stack trace; * If *self* log level is ``INFO`` or higher, then issue an ``ERROR`` message with only the last line of the stack trace. :param self: logger for which :func:`~self.exception` will be processed. :param msg: the message related to the exception to be logged. :param args: positional arguments to be passed to :func:`~self.debug` or :func:`~self.error`. :param kwargs: keyword arguments to be passed to :func:`~self.debug` or :func:`~self.error`. """ kwargs.pop("exc_info", False) if self.isEnabledFor(logging.DEBUG): self.debug(msg, *args, exc_info=True, **kwargs) else: msg = "{0}, DETAIL: '{1}'".format(msg, sys.exc_info()[1]) self.error(msg, *args, exc_info=False, **kwargs) def error_exception(self: logging.Logger, msg: object, *args: Any, **kwargs: Any) -> None: """Add full stack trace info to error messages. Handle :func:`~self.exception` calls for *self*. .. note:: * By default issue an ``ERROR`` message with the complete stack trace. If you do not want to show the complete stack trace, call with ``exc_info=False``. :param self: logger for which :func:`~self.exception` will be processed. :param msg: the message related to the exception to be logged. :param args: positional arguments to be passed to :func:`~self.error`. :param kwargs: keyword arguments to be passed to :func:`~self.error`. """ exc_info = kwargs.pop("exc_info", True) self.error(msg, *args, exc_info=exc_info, **kwargs) class QueueHandler(logging.Handler): """Queue-based logging handler. :ivar queue: queue to hold log messages that are pending to be flushed to the final destination. """ def __init__(self) -> None: """Queue initialised and initial records_lost established.""" super().__init__() self.queue: Queue[Union[logging.LogRecord, None]] = Queue() self._records_lost = 0 def _put_record(self, record: logging.LogRecord) -> None: """Asynchronously enqueue a log record. :param record: the record to be logged. """ self.format(record) record.msg = record.message record.args = None record.exc_info = None self.queue.put_nowait(record) def _try_to_report_lost_records(self) -> None: """Report the number of log messages that have been lost and reset the counter. .. note:: It will issue an ``WARNING`` message in the logs with the number of lost log messages. """ if self._records_lost: try: record = _LOGGER.makeRecord(_LOGGER.name, logging.WARNING, __file__, 0, 'QueueHandler has lost %s log records', (self._records_lost,), None, 'emit') self._put_record(record) self._records_lost = 0 except Exception: pass def emit(self, record: logging.LogRecord) -> None: """Handle each log record that is emitted. Call :func:`_put_record` to enqueue the emitted log record. Also check if we have previously lost any log record, and if so, log a ``WARNING`` message. :param record: the record that was emitted. """ try: self._put_record(record) self._try_to_report_lost_records() except Exception: self._records_lost += 1 @property def records_lost(self) -> int: """Number of log messages that have been lost while the queue was full.""" return self._records_lost class ProxyHandler(logging.Handler): """Handle log records in place of pending log handlers. .. note:: This is used to handle log messages while the logger thread has not started yet, in which case the queue-based handler is not yet started. :ivar patroni_logger: the logger thread. """ def __init__(self, patroni_logger: 'PatroniLogger') -> None: """Create a new :class:`ProxyHandler` instance. :param patroni_logger: the logger thread. """ super().__init__() self.patroni_logger = patroni_logger def emit(self, record: logging.LogRecord) -> None: """Emit each log record that is handled. Will push the log record down to :func:`~logging.Handler.handle` method of the currently configured log handler. :param record: the record that was emitted. """ if self.patroni_logger.log_handler is not None: self.patroni_logger.log_handler.handle(record) class PatroniLogger(Thread): """Logging thread for the Patroni daemon process. It is a 2-step logging approach. Any time a log message is issued it is initially enqueued in-memory, and then asynchronously flushed to the final destination by the logging thread. .. seealso:: :class:`QueueHandler`: object used for enqueueing messages in-memory. :cvar DEFAULT_LEVEL: default logging level (``INFO``). :cvar DEFAULT_TRACEBACK_LEVEL: default traceback logging level (``ERROR``). :cvar DEFAULT_FORMAT: default format of log messages (``%(asctime)s %(levelname)s: %(message)s``). :cvar NORMAL_LOG_QUEUE_SIZE: expected number of log messages per HA loop when operating under a normal situation. :cvar DEFAULT_MAX_QUEUE_SIZE: default maximum queue size for holding a backlog of log messages that are pending to be flushed. :cvar LOGGING_BROKEN_EXIT_CODE: exit code to be used if it detects(``5``). :ivar log_handler: log handler that is currently being used by the thread. :ivar log_handler_lock: lock used to modify ``log_handler``. """ DEFAULT_LEVEL = 'INFO' DEFAULT_TRACEBACK_LEVEL = 'ERROR' DEFAULT_FORMAT = '%(asctime)s %(levelname)s: %(message)s' NORMAL_LOG_QUEUE_SIZE = 2 # When everything goes normal Patroni writes only 2 messages per HA loop DEFAULT_MAX_QUEUE_SIZE = 1000 LOGGING_BROKEN_EXIT_CODE = 5 def __init__(self) -> None: """Prepare logging queue and proxy handlers as they become ready during daemon startup. .. note:: While Patroni is starting up it keeps ``DEBUG`` log level, and writes log messages through a proxy handler. Once the logger thread is finally started, it switches from that proxy handler to the queue based logger, and applies the configured log settings. The switching is used to avoid that the logger thread prevents Patroni from shutting down if any issue occurs in the meantime until the thread is properly started. """ super(PatroniLogger, self).__init__() self._queue_handler = QueueHandler() self._root_logger = logging.getLogger() self._config: Optional[Dict[str, Any]] = None self.log_handler = None self.log_handler_lock = Lock() self._old_handlers: List[logging.Handler] = [] # initially set log level to ``DEBUG`` while the logger thread has not started running yet. The daemon process # will later adjust all log related settings with what was provided through the user configuration file. self.reload_config({'level': 'DEBUG'}) # We will switch to the QueueHandler only when thread was started. # This is necessary to protect from the cases when Patroni constructor # failed and PatroniLogger thread remain running and prevent shutdown. self._proxy_handler = ProxyHandler(self) self._root_logger.addHandler(self._proxy_handler) def update_loggers(self, config: Dict[str, Any]) -> None: """Configure custom loggers' log levels. .. note:: It creates logger objects that are not defined yet in the log manager. :param config: :class:`dict` object with custom loggers configuration, is set either from: * ``log.loggers`` section of Patroni configuration; or * from the method that is trying to make sure that the node name isn't duplicated (to silence annoying ``urllib3`` WARNING's). :Example: .. code-block:: python update_loggers({'urllib3.connectionpool': 'WARNING'}) """ loggers = deepcopy(config) for name, logger in self._root_logger.manager.loggerDict.items(): # ``Placeholder`` is a node in the log manager for which no logger has been defined. We are interested only # in the ones that were defined if not isinstance(logger, logging.PlaceHolder): # if this logger is present in *config*, use the configured level, otherwise # use ``logging.NOTSET``, which means it will inherit the level # from any parent node up to the root for which log level is defined. level = loggers.pop(name, logging.NOTSET) logger.setLevel(level) # define loggers that do not exist yet and set level as configured in the *config* for name, level in loggers.items(): logger = self._root_logger.manager.getLogger(name) logger.setLevel(level) def reload_config(self, config: Dict[str, Any]) -> None: """Apply log related configuration. .. note:: It is also able to deal with runtime configuration changes. :param config: ``log`` section from Patroni configuration. """ if self._config is None or not deep_compare(self._config, config): with self._queue_handler.queue.mutex: self._queue_handler.queue.maxsize = config.get('max_queue_size', self.DEFAULT_MAX_QUEUE_SIZE) self._root_logger.setLevel(config.get('level', PatroniLogger.DEFAULT_LEVEL)) if config.get('traceback_level', PatroniLogger.DEFAULT_TRACEBACK_LEVEL).lower() == 'debug': # show stack traces only if ``log.traceback_level`` is ``DEBUG`` logging.Logger.exception = debug_exception else: # show stack traces as ``ERROR`` log messages logging.Logger.exception = error_exception new_handler = None if 'dir' in config: if not isinstance(self.log_handler, RotatingFileHandler): new_handler = RotatingFileHandler(os.path.join(config['dir'], __name__)) handler = new_handler or self.log_handler if TYPE_CHECKING: # pragma: no cover assert isinstance(handler, RotatingFileHandler) handler.maxBytes = int(config.get('file_size', 25000000)) # pyright: ignore [reportGeneralTypeIssues] handler.backupCount = int(config.get('file_num', 4)) else: if self.log_handler is None or isinstance(self.log_handler, RotatingFileHandler): new_handler = logging.StreamHandler() handler = new_handler or self.log_handler oldlogformat = (self._config or {}).get('format', PatroniLogger.DEFAULT_FORMAT) logformat = config.get('format', PatroniLogger.DEFAULT_FORMAT) olddateformat = (self._config or {}).get('dateformat') or None dateformat = config.get('dateformat') or None # Convert empty string to `None` if (oldlogformat != logformat or olddateformat != dateformat or new_handler) and handler: handler.setFormatter(logging.Formatter(logformat, dateformat)) if new_handler: with self.log_handler_lock: if self.log_handler: self._old_handlers.append(self.log_handler) self.log_handler = new_handler self._config = config.copy() self.update_loggers(config.get('loggers') or {}) def _close_old_handlers(self) -> None: """Close old log handlers. .. note:: It is used to remove different handlers that were configured previous to a reload in the configuration, e.g. if we are switching from :class:`~logging.handlers.RotatingFileHandler` to class:`~logging.StreamHandler` and vice-versa. """ while True: with self.log_handler_lock: if not self._old_handlers: break handler = self._old_handlers.pop() try: handler.close() except Exception: _LOGGER.exception('Failed to close the old log handler %s', handler) def run(self) -> None: """Run logger's thread main loop. Keep consuming log queue until requested to quit through ``None`` special log record. """ # switch to QueueHandler only when the thread was started with self.log_handler_lock: self._root_logger.addHandler(self._queue_handler) self._root_logger.removeHandler(self._proxy_handler) prev_record = None while True: self._close_old_handlers() if TYPE_CHECKING: # pragma: no cover assert self.log_handler is not None record = self._queue_handler.queue.get(True) # special message that indicates Patroni is shutting down if record is None: break if self._root_logger.level == logging.INFO: # messages like ``Lock owner: postgresql0; I am postgresql1`` will be shown only when stream doesn't # look normal. This is used to reduce chattiness of Patroni logs. if record.msg.startswith('Lock owner: '): prev_record, record = record, None else: if prev_record and prev_record.thread == record.thread: if not (record.msg.startswith('no action. ') or record.msg.startswith('PAUSE: no action')): self.log_handler.handle(prev_record) prev_record = None if record: self.log_handler.handle(record) self._queue_handler.queue.task_done() def shutdown(self) -> None: """Shut down the logger thread.""" try: # ``None`` is a special message indicating to queue handler that it should quit its main loop. self._queue_handler.queue.put_nowait(None) except Full: # Queue is full. # It seems that logging is not working, exiting with non-standard exit-code is the best we can do. sys.exit(self.LOGGING_BROKEN_EXIT_CODE) self.join() logging.shutdown() @property def queue_size(self) -> int: """Number of log records in the queue.""" return self._queue_handler.queue.qsize() @property def records_lost(self) -> int: """Number of logging records that have been lost while the queue was full.""" return self._queue_handler.records_lost patroni-3.2.2/patroni/postgresql/000077500000000000000000000000001455170150700170565ustar00rootroot00000000000000patroni-3.2.2/patroni/postgresql/__init__.py000066400000000000000000001732101455170150700211730ustar00rootroot00000000000000import logging import os import re import shlex import shutil import subprocess import time from contextlib import contextmanager from copy import deepcopy from datetime import datetime from dateutil import tz from psutil import TimeoutExpired from threading import current_thread, Lock from typing import Any, Callable, Dict, Iterator, List, Optional, Union, Tuple, TYPE_CHECKING from .bootstrap import Bootstrap from .callback_executor import CallbackAction, CallbackExecutor from .cancellable import CancellableSubprocess from .config import ConfigHandler, mtime from .connection import ConnectionPool, get_connection_cursor from .citus import CitusHandler from .misc import parse_history, parse_lsn, postgres_major_version_to_int from .postmaster import PostmasterProcess from .slots import SlotsHandler from .sync import SyncHandler from .. import psycopg from ..async_executor import CriticalTask from ..collections import CaseInsensitiveSet from ..dcs import Cluster, Leader, Member, SLOT_ADVANCE_AVAILABLE_VERSION from ..exceptions import PostgresConnectionException from ..utils import Retry, RetryFailedError, polling_loop, data_directory_is_empty, parse_int if TYPE_CHECKING: # pragma: no cover from psycopg import Connection as Connection3, Cursor from psycopg2 import connection as connection3, cursor from ..config import GlobalConfig logger = logging.getLogger(__name__) STATE_RUNNING = 'running' STATE_REJECT = 'rejecting connections' STATE_NO_RESPONSE = 'not responding' STATE_UNKNOWN = 'unknown' STOP_POLLING_INTERVAL = 1 @contextmanager def null_context(): yield class Postgresql(object): POSTMASTER_START_TIME = "pg_catalog.pg_postmaster_start_time()" TL_LSN = ("CASE WHEN pg_catalog.pg_is_in_recovery() THEN 0 " "ELSE ('x' || pg_catalog.substr(pg_catalog.pg_{0}file_name(" "pg_catalog.pg_current_{0}_{1}()), 1, 8))::bit(32)::int END, " # primary timeline "CASE WHEN pg_catalog.pg_is_in_recovery() THEN 0 ELSE " "pg_catalog.pg_{0}_{1}_diff(pg_catalog.pg_current_{0}{2}_{1}(), '0/0')::bigint END, " # wal(_flush)?_lsn "pg_catalog.pg_{0}_{1}_diff(pg_catalog.pg_last_{0}_replay_{1}(), '0/0')::bigint, " "pg_catalog.pg_{0}_{1}_diff(COALESCE(pg_catalog.pg_last_{0}_receive_{1}(), '0/0'), '0/0')::bigint, " "pg_catalog.pg_is_in_recovery() AND pg_catalog.pg_is_{0}_replay_paused()") def __init__(self, config: Dict[str, Any]) -> None: self.name: str = config['name'] self.scope: str = config['scope'] self._data_dir: str = config['data_dir'] self._database = config.get('database', 'postgres') self._version_file = os.path.join(self._data_dir, 'PG_VERSION') self._pg_control = os.path.join(self._data_dir, 'global', 'pg_control') self.connection_string: str self.proxy_url: Optional[str] self._major_version = self.get_major_version() self._global_config = None self._state_lock = Lock() self.set_state('stopped') self._pending_restart = False self.connection_pool = ConnectionPool() self._connection = self.connection_pool.get('heartbeat') self.citus_handler = CitusHandler(self, config.get('citus')) self.config = ConfigHandler(self, config) self.config.check_directories() self._bin_dir = config.get('bin_dir') or '' self.bootstrap = Bootstrap(self) self.bootstrapping = False self.__thread_ident = current_thread().ident self.slots_handler = SlotsHandler(self) self.sync_handler = SyncHandler(self) self._callback_executor = CallbackExecutor() self.__cb_called = False self.__cb_pending = None self.cancellable = CancellableSubprocess() self._sysid = '' self.retry = Retry(max_tries=-1, deadline=config['retry_timeout'] / 2.0, max_delay=1, retry_exceptions=PostgresConnectionException) # Retry 'pg_is_in_recovery()' only once self._is_leader_retry = Retry(max_tries=1, deadline=config['retry_timeout'] / 2.0, max_delay=1, retry_exceptions=PostgresConnectionException) self._role_lock = Lock() self.set_role(self.get_postgres_role_from_data_directory()) self._state_entry_timestamp = 0 self._cluster_info_state = {} self._has_permanent_slots = True self._enforce_hot_standby_feedback = False self._cached_replica_timeline = None # Last known running process self._postmaster_proc = None self._available_gucs = None if self.is_running(): # If we found postmaster process we need to figure out whether postgres is accepting connections self.set_state('starting') self.check_startup_state_changed() if self.state == 'running': # we are "joining" already running postgres # we know that PostgreSQL is accepting connections and can read some GUC's from pg_settings self.config.load_current_server_parameters() self.set_role('master' if self.is_primary() else 'replica') hba_saved = self.config.replace_pg_hba() ident_saved = self.config.replace_pg_ident() if self.major_version < 120000 or self.role in ('master', 'primary'): # If PostgreSQL is running as a primary or we run PostgreSQL that is older than 12 we can # call reload_config() once again (the first call happened in the ConfigHandler constructor), # so that it can figure out if config files should be updated and pg_ctl reload executed. self.config.reload_config(config, sighup=bool(hba_saved or ident_saved)) elif hba_saved or ident_saved: self.reload() elif not self.is_running() and self.role in ('master', 'primary'): self.set_role('demoted') @property def create_replica_methods(self) -> List[str]: return self.config.get('create_replica_methods', []) or self.config.get('create_replica_method', []) or [] @property def major_version(self) -> int: return self._major_version @property def database(self) -> str: return self._database @property def data_dir(self) -> str: return self._data_dir @property def callback(self) -> Dict[str, str]: return self.config.get('callbacks', {}) or {} @property def wal_dir(self) -> str: return os.path.join(self._data_dir, 'pg_' + self.wal_name) @property def wal_name(self) -> str: return 'wal' if self._major_version >= 100000 else 'xlog' @property def wal_flush(self) -> str: """For PostgreSQL 9.6 onwards we want to use pg_current_wal_flush_lsn()/pg_current_xlog_flush_location().""" return '_flush' if self._major_version >= 90600 else '' @property def lsn_name(self) -> str: return 'lsn' if self._major_version >= 100000 else 'location' @property def supports_multiple_sync(self) -> bool: """:returns: `True` if Postgres version supports more than one synchronous node.""" return self._major_version >= 90600 @property def can_advance_slots(self) -> bool: """``True`` if :attr:``major_version`` is greater than 110000.""" return self.major_version >= SLOT_ADVANCE_AVAILABLE_VERSION @property def cluster_info_query(self) -> str: """Returns the monitoring query with a fixed number of fields. The query text is constructed based on current state in DCS and PostgreSQL version: 1. function names depend on version. wal/lsn for v10+ and xlog/location for pre v10. 2. for primary we query timeline_id (extracted from pg_walfile_name()) and pg_current_wal_lsn() 3. for replicas we query pg_last_wal_receive_lsn(), pg_last_wal_replay_lsn(), and pg_is_wal_replay_paused() 4. for v9.6+ we query primary_slot_name and primary_conninfo from pg_stat_get_wal_receiver() 5. for v11+ with permanent logical slots we query from pg_replication_slots and aggregate the result 6. for standby_leader node running v9.6+ we also query pg_control_checkpoint to fetch timeline_id 7. if sync replication is enabled we query pg_stat_replication and aggregate the result. In addition to that we get current values of synchronous_commit and synchronous_standby_names GUCs. If some conditions are not satisfied we simply put static values instead. E.g., NULL, 0, '', and so on. """ extra = ", " + (("pg_catalog.current_setting('synchronous_commit'), " "pg_catalog.current_setting('synchronous_standby_names'), " "(SELECT pg_catalog.json_agg(r.*) FROM (SELECT w.pid as pid, application_name, sync_state," " pg_catalog.pg_{0}_{1}_diff(write_{1}, '0/0')::bigint AS write_lsn," " pg_catalog.pg_{0}_{1}_diff(flush_{1}, '0/0')::bigint AS flush_lsn," " pg_catalog.pg_{0}_{1}_diff(replay_{1}, '0/0')::bigint AS replay_lsn " "FROM pg_catalog.pg_stat_get_wal_senders() w," " pg_catalog.pg_stat_get_activity(w.pid)" " WHERE w.state = 'streaming') r)").format(self.wal_name, self.lsn_name) if (not self.global_config or self.global_config.is_synchronous_mode) and self.role in ('master', 'primary', 'promoted') else "'on', '', NULL") if self._major_version >= 90600: extra = ("pg_catalog.current_setting('restore_command')" if self._major_version >= 120000 else "NULL") +\ ", " + ("(SELECT pg_catalog.json_agg(s.*) FROM (SELECT slot_name, slot_type as type, datoid::bigint, " "plugin, catalog_xmin, pg_catalog.pg_wal_lsn_diff(confirmed_flush_lsn, '0/0')::bigint" " AS confirmed_flush_lsn, pg_catalog.pg_wal_lsn_diff(restart_lsn, '0/0')::bigint" " AS restart_lsn FROM pg_catalog.pg_get_replication_slots()) AS s)" if self._has_permanent_slots and self.can_advance_slots else "NULL") + extra extra = (", CASE WHEN latest_end_lsn IS NULL THEN NULL ELSE received_tli END," " slot_name, conninfo, status, {0} FROM pg_catalog.pg_stat_get_wal_receiver()").format(extra) if self.role == 'standby_leader': extra = "timeline_id" + extra + ", pg_catalog.pg_control_checkpoint()" else: extra = "0" + extra else: extra = "0, NULL, NULL, NULL, NULL, NULL, NULL" + extra return ("SELECT " + self.TL_LSN + ", {3}").format(self.wal_name, self.lsn_name, self.wal_flush, extra) @property def available_gucs(self) -> CaseInsensitiveSet: """GUCs available in this Postgres server.""" if not self._available_gucs: self._available_gucs = self._get_gucs() return self._available_gucs def _version_file_exists(self) -> bool: return not self.data_directory_empty() and os.path.isfile(self._version_file) def get_major_version(self) -> int: """Reads major version from PG_VERSION file :returns: major PostgreSQL version in integer format or 0 in case of missing file or errors""" if self._version_file_exists(): try: with open(self._version_file) as f: return postgres_major_version_to_int(f.read().strip()) except Exception: logger.exception('Failed to read PG_VERSION from %s', self._data_dir) return 0 def pgcommand(self, cmd: str) -> str: """Return path to the specified PostgreSQL command. .. note:: If ``postgresql.bin_name.*cmd*`` was configured by the user then that binary name is used, otherwise the default binary name *cmd* is used. :param cmd: the Postgres binary name to get path to. :returns: path to Postgres binary named *cmd*. """ return os.path.join(self._bin_dir, (self.config.get('bin_name', {}) or {}).get(cmd, cmd)) def pg_ctl(self, cmd: str, *args: str, **kwargs: Any) -> bool: """Builds and executes pg_ctl command :returns: `!True` when return_code == 0, otherwise `!False`""" pg_ctl = [self.pgcommand('pg_ctl'), cmd] return subprocess.call(pg_ctl + ['-D', self._data_dir] + list(args), **kwargs) == 0 def initdb(self, *args: str, **kwargs: Any) -> bool: """Builds and executes the initdb command. :param args: List of arguments to be joined into the initdb command. :param kwargs: Keyword arguments to pass to ``subprocess.call``. :returns: ``True`` if the result of ``subprocess.call`, the exit code, is ``0``. """ initdb = [self.pgcommand('initdb')] + list(args) + [self.data_dir] return subprocess.call(initdb, **kwargs) == 0 def pg_isready(self) -> str: """Runs pg_isready to see if PostgreSQL is accepting connections. :returns: 'ok' if PostgreSQL is up, 'reject' if starting up, 'no_resopnse' if not up.""" r = self.connection_pool.conn_kwargs cmd = [self.pgcommand('pg_isready'), '-p', r['port'], '-d', self._database] # Host is not set if we are connecting via default unix socket if 'host' in r: cmd.extend(['-h', r['host']]) # We only need the username because pg_isready does not try to authenticate if 'user' in r: cmd.extend(['-U', r['user']]) ret = subprocess.call(cmd) return_codes = {0: STATE_RUNNING, 1: STATE_REJECT, 2: STATE_NO_RESPONSE, 3: STATE_UNKNOWN} return return_codes.get(ret, STATE_UNKNOWN) def reload_config(self, config: Dict[str, Any], sighup: bool = False) -> None: self.config.reload_config(config, sighup) self._is_leader_retry.deadline = self.retry.deadline = config['retry_timeout'] / 2.0 @property def pending_restart(self) -> bool: return self._pending_restart def set_pending_restart(self, value: bool) -> None: self._pending_restart = value @property def sysid(self) -> str: if not self._sysid and not self.bootstrapping: data = self.controldata() self._sysid = data.get('Database system identifier', '') return self._sysid def get_postgres_role_from_data_directory(self) -> str: if self.data_directory_empty() or not self.controldata(): return 'uninitialized' elif self.config.recovery_conf_exists(): return 'replica' else: return 'master' @property def server_version(self) -> int: return self._connection.server_version def connection(self) -> Union['connection3', 'Connection3[Any]']: return self._connection.get() def _query(self, sql: str, *params: Any) -> List[Tuple[Any, ...]]: """Execute *sql* query with *params* and optionally return results. :param sql: SQL statement to execute. :param params: parameters to pass. :returns: a query response as a list of tuples if there is any. :raises: :exc:`~psycopg.Error` if had issues while executing *sql*. :exc:`~patroni.exceptions.PostgresConnectionException`: if had issues while connecting to the database. :exc:`~patroni.utils.RetryFailedError`: if it was detected that connection/query failed due to PostgreSQL restart. """ try: return self._connection.query(sql, *params) except PostgresConnectionException as exc: if self.state == 'restarting': raise RetryFailedError('cluster is being restarted') from exc raise def query(self, sql: str, *params: Any, retry: bool = True) -> List[Tuple[Any, ...]]: """Execute *sql* query with *params* and optionally return results. :param sql: SQL statement to execute. :param params: parameters to pass. :param retry: whether the query should be retried upon failure or given up immediately. :returns: a query response as a list of tuples if there is any. :raises: :exc:`~psycopg.Error` if had issues while executing *sql*. :exc:`~patroni.exceptions.PostgresConnectionException`: if had issues while connecting to the database. :exc:`~patroni.utils.RetryFailedError`: if it was detected that connection/query failed due to PostgreSQL restart or if retry deadline was exceeded. """ if not retry: return self._query(sql, *params) try: return self.retry(self._query, sql, *params) except RetryFailedError as exc: raise PostgresConnectionException(str(exc)) from exc def pg_control_exists(self) -> bool: return os.path.isfile(self._pg_control) def data_directory_empty(self) -> bool: if self.pg_control_exists(): return False return data_directory_is_empty(self._data_dir) def replica_method_options(self, method: str) -> Dict[str, Any]: return deepcopy(self.config.get(method, {}) or {}) def replica_method_can_work_without_replication_connection(self, method: str) -> bool: return method != 'basebackup' and bool(self.replica_method_options(method).get('no_master') or self.replica_method_options(method).get('no_leader')) def can_create_replica_without_replication_connection(self, replica_methods: Optional[List[str]]) -> bool: """ go through the replication methods to see if there are ones that does not require a working replication connection. """ if replica_methods is None: replica_methods = self.create_replica_methods return any(self.replica_method_can_work_without_replication_connection(m) for m in replica_methods) @property def enforce_hot_standby_feedback(self) -> bool: return self._enforce_hot_standby_feedback def set_enforce_hot_standby_feedback(self, value: bool) -> None: # If we enable or disable the hot_standby_feedback we need to update postgresql.conf and reload if self._enforce_hot_standby_feedback != value: self._enforce_hot_standby_feedback = value if self.is_running(): self.config.write_postgresql_conf() self.reload() @property def global_config(self) -> Optional['GlobalConfig']: return self._global_config def reset_cluster_info_state(self, cluster: Union[Cluster, None], nofailover: bool = False, global_config: Optional['GlobalConfig'] = None) -> None: """Reset monitoring query cache. It happens in the beginning of heart-beat loop and on change of `synchronous_standby_names`. :param cluster: currently known cluster state from DCS :param nofailover: whether this node could become a new primary. Important when there are logical permanent replication slots because "nofailover" node could do cascading replication and should enable `hot_standby_feedback` :param global_config: last known :class:`GlobalConfig` object """ self._cluster_info_state = {} if global_config: self._global_config = global_config if not self._global_config: return if self._global_config.is_standby_cluster: # Standby cluster can't have logical replication slots, and we don't need to enforce hot_standby_feedback self.set_enforce_hot_standby_feedback(False) if cluster and cluster.config and cluster.config.modify_version: # We want to enable hot_standby_feedback if the replica is supposed # to have a logical slot or in case if it is the cascading replica. self.set_enforce_hot_standby_feedback(not self._global_config.is_standby_cluster and self.can_advance_slots and cluster.should_enforce_hot_standby_feedback(self.name, nofailover)) self._has_permanent_slots = cluster.has_permanent_slots( my_name=self.name, is_standby_cluster=self._global_config.is_standby_cluster, nofailover=nofailover, major_version=self.major_version) def _cluster_info_state_get(self, name: str) -> Optional[Any]: if not self._cluster_info_state: try: result = self._is_leader_retry(self._query, self.cluster_info_query)[0] cluster_info_state = dict(zip(['timeline', 'wal_position', 'replayed_location', 'received_location', 'replay_paused', 'pg_control_timeline', 'received_tli', 'slot_name', 'conninfo', 'receiver_state', 'restore_command', 'slots', 'synchronous_commit', 'synchronous_standby_names', 'pg_stat_replication'], result)) if self._has_permanent_slots and self.can_advance_slots: cluster_info_state['slots'] =\ self.slots_handler.process_permanent_slots(cluster_info_state['slots']) self._cluster_info_state = cluster_info_state except RetryFailedError as e: # SELECT failed two times self._cluster_info_state = {'error': str(e)} if not self.is_starting() and self.pg_isready() == STATE_REJECT: self.set_state('starting') if 'error' in self._cluster_info_state: raise PostgresConnectionException(self._cluster_info_state['error']) return self._cluster_info_state.get(name) def replayed_location(self) -> Optional[int]: return self._cluster_info_state_get('replayed_location') def received_location(self) -> Optional[int]: return self._cluster_info_state_get('received_location') def slots(self) -> Dict[str, int]: return self._cluster_info_state_get('slots') or {} def primary_slot_name(self) -> Optional[str]: return self._cluster_info_state_get('slot_name') def primary_conninfo(self) -> Optional[str]: return self._cluster_info_state_get('conninfo') def received_timeline(self) -> Optional[int]: return self._cluster_info_state_get('received_tli') def synchronous_commit(self) -> str: """:returns: "synchronous_commit" GUC value.""" return self._cluster_info_state_get('synchronous_commit') or 'on' def synchronous_standby_names(self) -> str: """:returns: "synchronous_standby_names" GUC value.""" return self._cluster_info_state_get('synchronous_standby_names') or '' def pg_stat_replication(self) -> List[Dict[str, Any]]: """:returns: a result set of 'SELECT * FROM pg_stat_replication'.""" return self._cluster_info_state_get('pg_stat_replication') or [] def replication_state_from_parameters(self, is_primary: bool, receiver_state: Optional[str], restore_command: Optional[str]) -> Optional[str]: """Figure out the replication state from input parameters. .. note:: This method could be only called when Postgres is up, running and queries are successfuly executed. :is_primary: `True` is postgres is not running in recovery :receiver_state: value from `pg_stat_get_wal_receiver.state` or None if Postgres is older than 9.6 :restore_command: value of ``restore_command`` GUC for PostgreSQL 12+ or `postgresql.recovery_conf.restore_command` if it is set in Patroni configuration :returns: - `None` for the primary and for Postgres older than 9.6; - 'streaming' if replica is streaming according to the `pg_stat_wal_receiver` view; - 'in archive recovery' if replica isn't streaming and there is a `restore_command` """ if self._major_version >= 90600 and not is_primary: if receiver_state == 'streaming': return 'streaming' # For Postgres older than 12 we get `restore_command` from Patroni config, otherwise we check GUC if self._major_version < 120000 and self.config.restore_command() or restore_command: return 'in archive recovery' def replication_state(self) -> Optional[str]: """Checks replication state from `pg_stat_get_wal_receiver()`. .. note:: Available only since 9.6 :returns: ``streaming``, ``in archive recovery``, or ``None`` """ return self.replication_state_from_parameters(self.is_primary(), self._cluster_info_state_get('receiver_state'), self._cluster_info_state_get('restore_command')) def is_primary(self) -> bool: try: return bool(self._cluster_info_state_get('timeline')) except PostgresConnectionException: logger.warning('Failed to determine PostgreSQL state from the connection, falling back to cached role') return bool(self.is_running() and self.role in ('master', 'primary')) def replay_paused(self) -> bool: return self._cluster_info_state_get('replay_paused') or False def resume_wal_replay(self) -> None: self._query('SELECT pg_catalog.pg_{0}_replay_resume()'.format(self.wal_name)) def handle_parameter_change(self) -> None: if self.major_version >= 140000 and not self.is_starting() and self.replay_paused(): logger.info('Resuming paused WAL replay for PostgreSQL 14+') self.resume_wal_replay() def pg_control_timeline(self) -> Optional[int]: try: return int(self.controldata().get("Latest checkpoint's TimeLineID", "")) except (TypeError, ValueError): logger.exception('Failed to parse timeline from pg_controldata output') def parse_wal_record(self, timeline: str, lsn: str) -> Union[Tuple[str, str, str, str], Tuple[None, None, None, None]]: out, err = self.waldump(timeline, lsn, 1) if out and not err: match = re.match(r'^rmgr:\s+(.+?)\s+len \(rec/tot\):\s+\d+/\s+\d+, tx:\s+\d+, ' r'lsn: ([0-9A-Fa-f]+/[0-9A-Fa-f]+), prev ([0-9A-Fa-f]+/[0-9A-Fa-f]+), ' r'.*?desc: (.+)', out.decode('utf-8')) if match: return match.group(1), match.group(2), match.group(3), match.group(4) return None, None, None, None def _checkpoint_locations_from_controldata(self, data: Dict[str, str]) -> Optional[Tuple[int, int]]: """Get shutdown checkpoint location. :param data: :class:`dict` object with values returned by `pg_controldata` tool. :returns: a tuple of checkpoint LSN for the cleanly shut down primary, and LSN of prev wal record (SWITCH) if we know that the checkpoint was written to the new WAL file due to the archive_mode=on. """ timeline = data.get("Latest checkpoint's TimeLineID") lsn = checkpoint_lsn = data.get('Latest checkpoint location') prev_lsn = None if data.get('Database cluster state') == 'shut down' and lsn and timeline and checkpoint_lsn: try: checkpoint_lsn = parse_lsn(checkpoint_lsn) rm_name, lsn, prev, desc = self.parse_wal_record(timeline, lsn) desc = str(desc).strip().lower() if rm_name == 'XLOG' and lsn and parse_lsn(lsn) == checkpoint_lsn and prev and\ desc.startswith('checkpoint') and desc.endswith('shutdown'): _, lsn, _, desc = self.parse_wal_record(timeline, prev) prev = parse_lsn(prev) # If the cluster is shutdown with archive_mode=on, WAL is switched before writing the checkpoint. # In this case we want to take the LSN of previous record (SWITCH) as the last known WAL location. if lsn and parse_lsn(lsn) == prev and str(desc).strip() in ('xlog switch', 'SWITCH'): prev_lsn = prev except Exception as e: logger.error('Exception when parsing WAL pg_%sdump output: %r', self.wal_name, e) if isinstance(checkpoint_lsn, int): return checkpoint_lsn, (prev_lsn or checkpoint_lsn) def latest_checkpoint_location(self) -> Optional[int]: """Get shutdown checkpoint location. .. note:: In case if checkpoint was written to the new WAL file due to the archive_mode=on we return LSN of the previous wal record (SWITCH). :returns: checkpoint LSN for the cleanly shut down primary. """ checkpoint_locations = self._checkpoint_locations_from_controldata(self.controldata()) if checkpoint_locations: return checkpoint_locations[1] def is_running(self) -> Optional[PostmasterProcess]: """Returns PostmasterProcess if one is running on the data directory or None. If most recently seen process is running updates the cached process based on pid file.""" if self._postmaster_proc: if self._postmaster_proc.is_running(): return self._postmaster_proc self._postmaster_proc = None # we noticed that postgres was restarted, force syncing of replication slots and check of logical slots self.slots_handler.schedule() self._postmaster_proc = PostmasterProcess.from_pidfile(self._data_dir) return self._postmaster_proc @property def cb_called(self) -> bool: return self.__cb_called def call_nowait(self, cb_type: CallbackAction) -> None: """pick a callback command and call it without waiting for it to finish """ if self.bootstrapping: return if cb_type in (CallbackAction.ON_START, CallbackAction.ON_STOP, CallbackAction.ON_RESTART, CallbackAction.ON_ROLE_CHANGE): self.__cb_called = True if self.callback and cb_type in self.callback: cmd = self.callback[cb_type] role = 'master' if self.role == 'promoted' else self.role try: cmd = shlex.split(self.callback[cb_type]) + [cb_type, role, self.scope] self._callback_executor.call(cmd) except Exception: logger.exception('callback %s %r %s %s failed', cmd, cb_type, role, self.scope) @property def role(self) -> str: with self._role_lock: return self._role def set_role(self, value: str) -> None: with self._role_lock: self._role = value @property def state(self) -> str: with self._state_lock: return self._state def set_state(self, value: str) -> None: with self._state_lock: self._state = value self._state_entry_timestamp = time.time() def time_in_state(self) -> float: return time.time() - self._state_entry_timestamp def is_starting(self) -> bool: return self.state == 'starting' def wait_for_port_open(self, postmaster: PostmasterProcess, timeout: float) -> bool: """Waits until PostgreSQL opens ports.""" for _ in polling_loop(timeout): if self.cancellable.is_cancelled: return False if not postmaster.is_running(): logger.error('postmaster is not running') self.set_state('start failed') return False isready = self.pg_isready() if isready != STATE_NO_RESPONSE: if isready not in [STATE_REJECT, STATE_RUNNING]: logger.warning("Can't determine PostgreSQL startup status, assuming running") return True logger.warning("Timed out waiting for PostgreSQL to start") return False def start(self, timeout: Optional[float] = None, task: Optional[CriticalTask] = None, block_callbacks: bool = False, role: Optional[str] = None, after_start: Optional[Callable[..., Any]] = None) -> Optional[bool]: """Start PostgreSQL Waits for postmaster to open ports or terminate so pg_isready can be used to check startup completion or failure. :returns: True if start was initiated and postmaster ports are open, False if start failed, and None if postgres is still starting up""" # make sure we close all connections established against # the former node, otherwise, we might get a stalled one # after kill -9, which would report incorrect data to # patroni. self.connection_pool.close() if self.is_running(): logger.error('Cannot start PostgreSQL because one is already running.') self.set_state('starting') return True if not block_callbacks: self.__cb_pending = CallbackAction.ON_START self.set_role(role or self.get_postgres_role_from_data_directory()) self.set_state('starting') self._pending_restart = False try: if not self.ensure_major_version_is_known(): return None configuration = self.config.effective_configuration except Exception: return None self.config.check_directories() self.config.write_postgresql_conf(configuration) self.config.resolve_connection_addresses() self.config.replace_pg_hba() self.config.replace_pg_ident() options = ['--{0}={1}'.format(p, configuration[p]) for p in self.config.CMDLINE_OPTIONS if p in configuration and p not in ('wal_keep_segments', 'wal_keep_size')] if self.cancellable.is_cancelled: return False with task or null_context(): if task and task.is_cancelled: logger.info("PostgreSQL start cancelled.") return False self._postmaster_proc = PostmasterProcess.start(self.pgcommand('postgres'), self._data_dir, self.config.postgresql_conf, options) if task: task.complete(self._postmaster_proc) start_timeout = timeout if not start_timeout: try: start_timeout = float(self.config.get('pg_ctl_timeout', 60) or 0) except ValueError: start_timeout = 60 # We want postmaster to open ports before we continue if not self._postmaster_proc or not self.wait_for_port_open(self._postmaster_proc, start_timeout): return False ret = self.wait_for_startup(start_timeout) if ret is not None: if ret and after_start: after_start() return ret elif timeout is not None: return False else: return None def checkpoint(self, connect_kwargs: Optional[Dict[str, Any]] = None, timeout: Optional[float] = None) -> Optional[str]: check_not_is_in_recovery = connect_kwargs is not None connect_kwargs = connect_kwargs or self.connection_pool.conn_kwargs for p in ['connect_timeout', 'options']: connect_kwargs.pop(p, None) if timeout: connect_kwargs['connect_timeout'] = timeout try: with get_connection_cursor(**connect_kwargs) as cur: cur.execute("SET statement_timeout = 0") if check_not_is_in_recovery: cur.execute('SELECT pg_catalog.pg_is_in_recovery()') row = cur.fetchone() if not row or row[0]: return 'is_in_recovery=true' cur.execute('CHECKPOINT') except psycopg.Error: logger.exception('Exception during CHECKPOINT') return 'not accessible or not healty' def stop(self, mode: str = 'fast', block_callbacks: bool = False, checkpoint: Optional[bool] = None, on_safepoint: Optional[Callable[..., Any]] = None, on_shutdown: Optional[Callable[[int, int], Any]] = None, before_shutdown: Optional[Callable[..., Any]] = None, stop_timeout: Optional[int] = None) -> bool: """Stop PostgreSQL Supports a callback when a safepoint is reached. A safepoint is when no user backend can return a successful commit to users. Currently this means we wait for user backends to close. But in the future alternate mechanisms could be added. :param on_safepoint: This callback is called when no user backends are running. :param on_shutdown: is called when pg_controldata starts reporting `Database cluster state: shut down` :param before_shutdown: is called after running optional CHECKPOINT and before running pg_ctl stop """ if checkpoint is None: checkpoint = False if mode == 'immediate' else True success, pg_signaled = self._do_stop(mode, block_callbacks, checkpoint, on_safepoint, on_shutdown, before_shutdown, stop_timeout) if success: # block_callbacks is used during restart to avoid # running start/stop callbacks in addition to restart ones if not block_callbacks: self.set_state('stopped') if pg_signaled: self.call_nowait(CallbackAction.ON_STOP) else: logger.warning('pg_ctl stop failed') self.set_state('stop failed') return success def _do_stop(self, mode: str, block_callbacks: bool, checkpoint: bool, on_safepoint: Optional[Callable[..., Any]], on_shutdown: Optional[Callable[[int, int], Any]], before_shutdown: Optional[Callable[..., Any]], stop_timeout: Optional[int]) -> Tuple[bool, bool]: postmaster = self.is_running() if not postmaster: if on_safepoint: on_safepoint() return True, False if checkpoint and not self.is_starting(): self.checkpoint(timeout=stop_timeout) if not block_callbacks: self.set_state('stopping') # invoke user-directed before stop script self._before_stop() if before_shutdown: before_shutdown() # Send signal to postmaster to stop success = postmaster.signal_stop(mode, self.pgcommand('pg_ctl')) if success is not None: if success and on_safepoint: on_safepoint() return success, True # We can skip safepoint detection if we don't have a callback if on_safepoint: # Wait for our connection to terminate so we can be sure that no new connections are being initiated self._wait_for_connection_close(postmaster) postmaster.wait_for_user_backends_to_close(stop_timeout) on_safepoint() if on_shutdown and mode in ('fast', 'smart'): i = 0 # Wait for pg_controldata `Database cluster state:` to change to "shut down" while postmaster.is_running(): data = self.controldata() if data.get('Database cluster state', '') == 'shut down': checkpoint_locations = self._checkpoint_locations_from_controldata(data) if checkpoint_locations: on_shutdown(*checkpoint_locations) break elif data.get('Database cluster state', '').startswith('shut down'): # shut down in recovery break elif stop_timeout and i >= stop_timeout: stop_timeout = 0 break time.sleep(STOP_POLLING_INTERVAL) i += STOP_POLLING_INTERVAL try: postmaster.wait(timeout=stop_timeout) except TimeoutExpired: logger.warning("Timeout during postmaster stop, aborting Postgres.") if not self.terminate_postmaster(postmaster, mode, stop_timeout): postmaster.wait() return True, True def terminate_postmaster(self, postmaster: PostmasterProcess, mode: str, stop_timeout: Optional[int]) -> Optional[bool]: if mode in ['fast', 'smart']: try: success = postmaster.signal_stop('immediate', self.pgcommand('pg_ctl')) if success: return True postmaster.wait(timeout=stop_timeout) return True except TimeoutExpired: pass logger.warning("Sending SIGKILL to Postmaster and its children") return postmaster.signal_kill() def terminate_starting_postmaster(self, postmaster: PostmasterProcess) -> None: """Terminates a postmaster that has not yet opened ports or possibly even written a pid file. Blocks until the process goes away.""" postmaster.signal_stop('immediate', self.pgcommand('pg_ctl')) postmaster.wait() def _wait_for_connection_close(self, postmaster: PostmasterProcess) -> None: try: while postmaster.is_running(): # Need a timeout here? self._connection.query("SELECT 1") time.sleep(STOP_POLLING_INTERVAL) except (psycopg.Error, PostgresConnectionException): pass def reload(self, block_callbacks: bool = False) -> bool: ret = self.pg_ctl('reload') if ret and not block_callbacks: self.call_nowait(CallbackAction.ON_RELOAD) return ret def check_for_startup(self) -> bool: """Checks PostgreSQL status and returns if PostgreSQL is in the middle of startup.""" return self.is_starting() and not self.check_startup_state_changed() def check_startup_state_changed(self) -> bool: """Checks if PostgreSQL has completed starting up or failed or still starting. Should only be called when state == 'starting' :returns: True if state was changed from 'starting' """ ready = self.pg_isready() if ready == STATE_REJECT: return False elif ready == STATE_NO_RESPONSE: ret = not self.is_running() if ret: self.set_state('start failed') self.slots_handler.schedule(False) # TODO: can remove this? self.config.save_configuration_files(True) # TODO: maybe remove this? return ret else: if ready != STATE_RUNNING: # Bad configuration or unexpected OS error. No idea of PostgreSQL status. # Let the main loop of run cycle clean up the mess. logger.warning("%s status returned from pg_isready", "Unknown" if ready == STATE_UNKNOWN else "Invalid") self.set_state('running') self.slots_handler.schedule() self.config.save_configuration_files(True) # TODO: __cb_pending can be None here after PostgreSQL restarts on its own. Do we want to call the callback? # Previously we didn't even notice. action = self.__cb_pending or CallbackAction.ON_START self.call_nowait(action) self.__cb_pending = None return True def wait_for_startup(self, timeout: float = 0) -> Optional[bool]: """Waits for PostgreSQL startup to complete or fail. :returns: True if start was successful, False otherwise""" if not self.is_starting(): # Should not happen logger.warning("wait_for_startup() called when not in starting state") while not self.check_startup_state_changed(): if self.cancellable.is_cancelled or timeout and self.time_in_state() > timeout: return None time.sleep(1) return self.state == 'running' def restart(self, timeout: Optional[float] = None, task: Optional[CriticalTask] = None, block_callbacks: bool = False, role: Optional[str] = None, before_shutdown: Optional[Callable[..., Any]] = None, after_start: Optional[Callable[..., Any]] = None) -> Optional[bool]: """Restarts PostgreSQL. When timeout parameter is set the call will block either until PostgreSQL has started, failed to start or timeout arrives. :returns: True when restart was successful and timeout did not expire when waiting. """ self.set_state('restarting') if not block_callbacks: self.__cb_pending = CallbackAction.ON_RESTART ret = self.stop(block_callbacks=True, before_shutdown=before_shutdown)\ and self.start(timeout, task, True, role, after_start) if not ret and not self.is_starting(): self.set_state('restart failed ({0})'.format(self.state)) return ret def is_healthy(self) -> bool: if not self.is_running(): logger.warning('Postgresql is not running.') return False return True def get_guc_value(self, name: str) -> Optional[str]: cmd = [self.pgcommand('postgres'), '-D', self._data_dir, '-C', name, '--config-file={}'.format(self.config.postgresql_conf)] try: data = subprocess.check_output(cmd) if data: return data.decode('utf-8').strip() except Exception as e: logger.error('Failed to execute %s: %r', cmd, e) def controldata(self) -> Dict[str, str]: """ return the contents of pg_controldata, or non-True value if pg_controldata call failed """ # Don't try to call pg_controldata during backup restore if self._version_file_exists() and self.state != 'creating replica': try: env = {**os.environ, 'LANG': 'C', 'LC_ALL': 'C'} data = subprocess.check_output([self.pgcommand('pg_controldata'), self._data_dir], env=env) if data: data = filter(lambda e: ':' in e, data.decode('utf-8').splitlines()) # pg_controldata output depends on major version. Some of parameters are prefixed by 'Current ' return {k.replace('Current ', '', 1): v.strip() for k, v in map(lambda e: e.split(':', 1), data)} except subprocess.CalledProcessError: logger.exception("Error when calling pg_controldata") return {} def waldump(self, timeline: Union[int, str], lsn: str, limit: int) -> Tuple[Optional[bytes], Optional[bytes]]: cmd = self.pgcommand('pg_{0}dump'.format(self.wal_name)) env = {**os.environ, 'LANG': 'C', 'LC_ALL': 'C', 'PGDATA': self._data_dir} try: waldump = subprocess.Popen([cmd, '-t', str(timeline), '-s', lsn, '-n', str(limit)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) out, err = waldump.communicate() waldump.wait() return out, err except Exception as e: logger.error('Failed to execute `%s -t %s -s %s -n %s`: %r', cmd, timeline, lsn, limit, e) return None, None @contextmanager def get_replication_connection_cursor(self, host: Optional[str] = None, port: Union[int, str] = 5432, **kwargs: Any) -> Iterator[Union['cursor', 'Cursor[Any]']]: conn_kwargs = self.config.replication.copy() conn_kwargs.update(host=host, port=int(port) if port else None, user=conn_kwargs.pop('username'), connect_timeout=3, replication=1, options='-c statement_timeout=2000') with get_connection_cursor(**conn_kwargs) as cur: yield cur def get_replica_timeline(self) -> Optional[int]: try: with self.get_replication_connection_cursor(**self.config.local_replication_address) as cur: cur.execute('IDENTIFY_SYSTEM') row = cur.fetchone() return row[1] if row else None except Exception: logger.exception('Can not fetch local timeline and lsn from replication connection') def replica_cached_timeline(self, primary_timeline: Optional[int]) -> Optional[int]: if not self._cached_replica_timeline or not primary_timeline\ or self._cached_replica_timeline != primary_timeline: self._cached_replica_timeline = self.get_replica_timeline() return self._cached_replica_timeline def get_primary_timeline(self) -> int: """:returns: current timeline if postgres is running as a primary or 0.""" return self._cluster_info_state_get('timeline') or 0 def get_history(self, timeline: int) -> List[Union[Tuple[int, int, str], Tuple[int, int, str, str, str]]]: history_path = os.path.join(self.wal_dir, '{0:08X}.history'.format(timeline)) history_mtime = mtime(history_path) history: List[Union[Tuple[int, int, str], Tuple[int, int, str, str, str]]] = [] if history_mtime: try: with open(history_path, 'r') as f: history_content = f.read() history = list(parse_history(history_content)) if history[-1][0] == timeline - 1: history_mtime = datetime.fromtimestamp(history_mtime).replace(tzinfo=tz.tzlocal()) history[-1] = history[-1][:3] + (history_mtime.isoformat(), self.name) except Exception: logger.exception('Failed to read and parse %s', (history_path,)) return history def follow(self, member: Union[Leader, Member, None], role: str = 'replica', timeout: Optional[float] = None, do_reload: bool = False) -> Optional[bool]: """Reconfigure postgres to follow a new member or use different recovery parameters. Method may call `on_role_change` callback if role is changing. :param member: The member to follow :param role: The desired role, normally 'replica', but could also be a 'standby_leader' :param timeout: start timeout, how long should the `start()` method wait for postgres accepting connections :param do_reload: indicates that after updating postgresql.conf we just need to do a reload instead of restart :returns: True - if restart/reload were successfully performed, False - if restart/reload failed None - if nothing was done or if Postgres is still in starting state after `timeout` seconds.""" if not self.ensure_major_version_is_known(): return None recovery_params = self.config.build_recovery_params(member) self.config.write_recovery_conf(recovery_params) # When we demoting the primary or standby_leader to replica or promoting replica to a standby_leader # and we know for sure that postgres was already running before, we will only execute on_role_change # callback and prevent execution of on_restart/on_start callback. # If the role remains the same (replica or standby_leader), we will execute on_start or on_restart change_role = self.cb_called and (self.role in ('master', 'primary', 'demoted') or not {'standby_leader', 'replica'} - {self.role, role}) if change_role: self.__cb_pending = CallbackAction.NOOP ret = True if self.is_running(): if do_reload: self.config.write_postgresql_conf() ret = self.reload(block_callbacks=change_role) if ret and change_role: self.set_role(role) else: ret = self.restart(block_callbacks=change_role, role=role) else: ret = self.start(timeout=timeout, block_callbacks=change_role, role=role) or None if change_role: # TODO: postpone this until start completes, or maybe do even earlier self.call_nowait(CallbackAction.ON_ROLE_CHANGE) return ret def _wait_promote(self, wait_seconds: int) -> Optional[bool]: for _ in polling_loop(wait_seconds): data = self.controldata() if data.get('Database cluster state') == 'in production': self.set_role('master') return True def _pre_promote(self) -> bool: """ Runs a fencing script after the leader lock is acquired but before the replica is promoted. If the script exits with a non-zero code, promotion does not happen and the leader key is removed from DCS. """ cmd = self.config.get('pre_promote') if not cmd: return True ret = self.cancellable.call(shlex.split(cmd)) if ret is not None: logger.info('pre_promote script `%s` exited with %s', cmd, ret) return ret == 0 def _before_stop(self) -> None: """Synchronously run a script prior to stopping postgres.""" cmd = self.config.get('before_stop') if cmd: self._do_before_stop(cmd) def _do_before_stop(self, cmd: str) -> None: try: ret = self.cancellable.call(shlex.split(cmd)) if ret is not None: logger.info('before_stop script `%s` exited with %s', cmd, ret) except Exception as e: logger.error('Exception when calling `%s`: %r', cmd, e) def promote(self, wait_seconds: int, task: CriticalTask, before_promote: Optional[Callable[..., Any]] = None) -> Optional[bool]: if self.role in ('promoted', 'master', 'primary'): return True ret = self._pre_promote() with task: if task.is_cancelled: return False task.complete(ret) if ret is False: return False if self.cancellable.is_cancelled: logger.info("PostgreSQL promote cancelled.") return False if before_promote is not None: before_promote() self.slots_handler.on_promote() self.citus_handler.schedule_cache_rebuild() ret = self.pg_ctl('promote', '-W') if ret: self.set_role('promoted') self.call_nowait(CallbackAction.ON_ROLE_CHANGE) ret = self._wait_promote(wait_seconds) return ret @staticmethod def _wal_position(is_primary: bool, wal_position: int, received_location: Optional[int], replayed_location: Optional[int]) -> int: return wal_position if is_primary else max(received_location or 0, replayed_location or 0) def timeline_wal_position(self) -> Tuple[int, int, Optional[int]]: # This method could be called from different threads (simultaneously with some other `_query` calls). # If it is called not from main thread we will create a new cursor to execute statement. if current_thread().ident == self.__thread_ident: timeline = self._cluster_info_state_get('timeline') or 0 wal_position = self._cluster_info_state_get('wal_position') or 0 replayed_location = self.replayed_location() received_location = self.received_location() pg_control_timeline = self._cluster_info_state_get('pg_control_timeline') else: timeline, wal_position, replayed_location, received_location, _, pg_control_timeline = \ self._query(self.cluster_info_query)[0][:6] wal_position = self._wal_position(bool(timeline), wal_position, received_location, replayed_location) return timeline, wal_position, pg_control_timeline def postmaster_start_time(self) -> Optional[str]: try: sql = "SELECT " + self.POSTMASTER_START_TIME return self.query(sql, retry=current_thread().ident == self.__thread_ident)[0][0].isoformat(sep=' ') except psycopg.Error: return None def last_operation(self) -> int: return self._wal_position(self.is_primary(), self._cluster_info_state_get('wal_position') or 0, self.received_location(), self.replayed_location()) def configure_server_parameters(self) -> None: self._major_version = self.get_major_version() self.config.setup_server_parameters() def ensure_major_version_is_known(self) -> bool: """Calls configure_server_parameters() if `_major_version` is not known :returns: `True` if `_major_version` is set, otherwise `False`""" if not self._major_version: self.configure_server_parameters() return self._major_version > 0 def pg_wal_realpath(self) -> Dict[str, str]: """Returns a dict containing the symlink (key) and target (value) for the wal directory""" links: Dict[str, str] = {} for pg_wal_dir in ('pg_xlog', 'pg_wal'): pg_wal_path = os.path.join(self._data_dir, pg_wal_dir) if os.path.exists(pg_wal_path) and os.path.islink(pg_wal_path): pg_wal_realpath = os.path.realpath(pg_wal_path) links[pg_wal_path] = pg_wal_realpath return links def pg_tblspc_realpaths(self) -> Dict[str, str]: """Returns a dict containing the symlink (key) and target (values) for the tablespaces""" links: Dict[str, str] = {} pg_tblsp_dir = os.path.join(self._data_dir, 'pg_tblspc') if os.path.exists(pg_tblsp_dir): for tsdn in os.listdir(pg_tblsp_dir): pg_tsp_path = os.path.join(pg_tblsp_dir, tsdn) if parse_int(tsdn) and os.path.islink(pg_tsp_path): pg_tsp_rpath = os.path.realpath(pg_tsp_path) links[pg_tsp_path] = pg_tsp_rpath return links def move_data_directory(self) -> None: if os.path.isdir(self._data_dir) and not self.is_running(): try: postfix = 'failed' # let's see if the wal directory is a symlink, in this case we # should move the target for (source, pg_wal_realpath) in self.pg_wal_realpath().items(): logger.info('renaming WAL directory and updating symlink: %s', pg_wal_realpath) new_name = '{0}.{1}'.format(pg_wal_realpath, postfix) if os.path.exists(new_name): shutil.rmtree(new_name) os.rename(pg_wal_realpath, new_name) os.unlink(source) os.symlink(new_name, source) # Move user defined tablespace directory for (source, pg_tsp_rpath) in self.pg_tblspc_realpaths().items(): logger.info('renaming user defined tablespace directory and updating symlink: %s', pg_tsp_rpath) new_name = '{0}.{1}'.format(pg_tsp_rpath, postfix) if os.path.exists(new_name): shutil.rmtree(new_name) os.rename(pg_tsp_rpath, new_name) os.unlink(source) os.symlink(new_name, source) new_name = '{0}.{1}'.format(self._data_dir, postfix) logger.info('renaming data directory to %s', new_name) if os.path.exists(new_name): shutil.rmtree(new_name) os.rename(self._data_dir, new_name) except OSError: logger.exception("Could not rename data directory %s", self._data_dir) def remove_data_directory(self) -> None: self.set_role('uninitialized') logger.info('Removing data directory: %s', self._data_dir) try: if os.path.islink(self._data_dir): os.unlink(self._data_dir) elif not os.path.exists(self._data_dir): return elif os.path.isfile(self._data_dir): os.remove(self._data_dir) elif os.path.isdir(self._data_dir): # let's see if wal directory is a symlink, in this case we # should clean the target for pg_wal_realpath in self.pg_wal_realpath().values(): logger.info('Removing WAL directory: %s', pg_wal_realpath) shutil.rmtree(pg_wal_realpath) # Remove user defined tablespace directories for pg_tsp_rpath in self.pg_tblspc_realpaths().values(): logger.info('Removing user defined tablespace directory: %s', pg_tsp_rpath) shutil.rmtree(pg_tsp_rpath, ignore_errors=True) shutil.rmtree(self._data_dir) except (IOError, OSError): logger.exception('Could not remove data directory %s', self._data_dir) self.move_data_directory() def schedule_sanity_checks_after_pause(self) -> None: """ After coming out of pause we have to: 1. configure server parameters if necessary 2. sync replication slots, because it might happen that slots were removed 3. get new 'Database system identifier' to make sure that it wasn't changed """ self.ensure_major_version_is_known() self.slots_handler.schedule() self.citus_handler.schedule_cache_rebuild() self._sysid = '' def _get_gucs(self) -> CaseInsensitiveSet: """Get all available GUCs based on ``postgres --describe-config`` output. :returns: all available GUCs in the local Postgres server. """ cmd = [self.pgcommand('postgres'), '--describe-config'] return CaseInsensitiveSet({ line.split('\t')[0] for line in subprocess.check_output(cmd).decode('utf-8').strip().split('\n') }) patroni-3.2.2/patroni/postgresql/available_parameters/000077500000000000000000000000001455170150700232215ustar00rootroot00000000000000patroni-3.2.2/patroni/postgresql/available_parameters/0_postgres.yml000066400000000000000000001046061455170150700260400ustar00rootroot00000000000000parameters: allow_in_place_tablespaces: - type: Bool version_from: 150000 allow_system_table_mods: - type: Bool version_from: 90300 application_name: - type: String version_from: 90300 archive_command: - type: String version_from: 90300 archive_library: - type: String version_from: 150000 archive_mode: - type: Bool version_from: 90300 version_till: 90500 - type: EnumBool version_from: 90500 possible_values: - always archive_timeout: - type: Integer version_from: 90300 min_val: 0 max_val: 1073741823 unit: s array_nulls: - type: Bool version_from: 90300 authentication_timeout: - type: Integer version_from: 90300 min_val: 1 max_val: 600 unit: s autovacuum: - type: Bool version_from: 90300 autovacuum_analyze_scale_factor: - type: Real version_from: 90300 min_val: 0 max_val: 100 autovacuum_analyze_threshold: - type: Integer version_from: 90300 min_val: 0 max_val: 2147483647 autovacuum_freeze_max_age: - type: Integer version_from: 90300 min_val: 100000 max_val: 2000000000 autovacuum_max_workers: - type: Integer version_from: 90300 version_till: 90600 min_val: 1 max_val: 8388607 - type: Integer version_from: 90600 min_val: 1 max_val: 262143 autovacuum_multixact_freeze_max_age: - type: Integer version_from: 90300 min_val: 10000 max_val: 2000000000 autovacuum_naptime: - type: Integer version_from: 90300 min_val: 1 max_val: 2147483 unit: s autovacuum_vacuum_cost_delay: - type: Integer version_from: 90300 version_till: 120000 min_val: -1 max_val: 100 unit: ms - type: Real version_from: 120000 min_val: -1 max_val: 100 unit: ms autovacuum_vacuum_cost_limit: - type: Integer version_from: 90300 min_val: -1 max_val: 10000 autovacuum_vacuum_insert_scale_factor: - type: Real version_from: 130000 min_val: 0 max_val: 100 autovacuum_vacuum_insert_threshold: - type: Integer version_from: 130000 min_val: -1 max_val: 2147483647 autovacuum_vacuum_scale_factor: - type: Real version_from: 90300 min_val: 0 max_val: 100 autovacuum_vacuum_threshold: - type: Integer version_from: 90300 min_val: 0 max_val: 2147483647 autovacuum_work_mem: - type: Integer version_from: 90400 min_val: -1 max_val: 2147483647 unit: kB backend_flush_after: - type: Integer version_from: 90600 min_val: 0 max_val: 256 unit: 8kB backslash_quote: - type: EnumBool version_from: 90300 possible_values: - safe_encoding backtrace_functions: - type: String version_from: 130000 bgwriter_delay: - type: Integer version_from: 90300 min_val: 10 max_val: 10000 unit: ms bgwriter_flush_after: - type: Integer version_from: 90600 min_val: 0 max_val: 256 unit: 8kB bgwriter_lru_maxpages: - type: Integer version_from: 90300 version_till: 100000 min_val: 0 max_val: 1000 - type: Integer version_from: 100000 min_val: 0 max_val: 1073741823 bgwriter_lru_multiplier: - type: Real version_from: 90300 min_val: 0 max_val: 10 bonjour: - type: Bool version_from: 90300 bonjour_name: - type: String version_from: 90300 bytea_output: - type: Enum version_from: 90300 possible_values: - escape - hex check_function_bodies: - type: Bool version_from: 90300 checkpoint_completion_target: - type: Real version_from: 90300 min_val: 0 max_val: 1 checkpoint_flush_after: - type: Integer version_from: 90600 min_val: 0 max_val: 256 unit: 8kB checkpoint_segments: - type: Integer version_from: 90300 version_till: 90500 min_val: 1 max_val: 2147483647 checkpoint_timeout: - type: Integer version_from: 90300 version_till: 90600 min_val: 30 max_val: 3600 unit: s - type: Integer version_from: 90600 min_val: 30 max_val: 86400 unit: s checkpoint_warning: - type: Integer version_from: 90300 min_val: 0 max_val: 2147483647 unit: s client_connection_check_interval: - type: Integer version_from: 140000 min_val: 0 max_val: 2147483647 unit: ms client_encoding: - type: String version_from: 90300 client_min_messages: - type: Enum version_from: 90300 possible_values: - debug5 - debug4 - debug3 - debug2 - debug1 - log - notice - warning - error cluster_name: - type: String version_from: 90500 commit_delay: - type: Integer version_from: 90300 min_val: 0 max_val: 100000 commit_siblings: - type: Integer version_from: 90300 min_val: 0 max_val: 1000 compute_query_id: - type: EnumBool version_from: 140000 version_till: 150000 possible_values: - auto - type: EnumBool version_from: 150000 possible_values: - auto - regress config_file: - type: String version_from: 90300 constraint_exclusion: - type: EnumBool version_from: 90300 possible_values: - partition cpu_index_tuple_cost: - type: Real version_from: 90300 min_val: 0 max_val: 1.79769e+308 cpu_operator_cost: - type: Real version_from: 90300 min_val: 0 max_val: 1.79769e+308 cpu_tuple_cost: - type: Real version_from: 90300 min_val: 0 max_val: 1.79769e+308 createrole_self_grant: - type: String version_from: 160000 cursor_tuple_fraction: - type: Real version_from: 90300 min_val: 0 max_val: 1 data_directory: - type: String version_from: 90300 data_sync_retry: - type: Bool version_from: 90400 DateStyle: - type: String version_from: 90300 db_user_namespace: - type: Bool version_from: 90300 deadlock_timeout: - type: Integer version_from: 90300 min_val: 1 max_val: 2147483647 unit: ms debug_discard_caches: - type: Integer version_from: 150000 min_val: 0 max_val: 0 debug_io_direct: - type: String version_from: 160000 debug_parallel_query: - type: EnumBool version_from: 160000 possible_values: - regress debug_pretty_print: - type: Bool version_from: 90300 debug_print_parse: - type: Bool version_from: 90300 debug_print_plan: - type: Bool version_from: 90300 debug_print_rewritten: - type: Bool version_from: 90300 default_statistics_target: - type: Integer version_from: 90300 min_val: 1 max_val: 10000 default_table_access_method: - type: String version_from: 120000 default_tablespace: - type: String version_from: 90300 default_text_search_config: - type: String version_from: 90300 default_toast_compression: - type: Enum version_from: 140000 possible_values: - pglz - lz4 default_transaction_deferrable: - type: Bool version_from: 90300 default_transaction_isolation: - type: Enum version_from: 90300 possible_values: - serializable - repeatable read - read committed - read uncommitted default_transaction_read_only: - type: Bool version_from: 90300 default_with_oids: - type: Bool version_from: 90300 version_till: 120000 dynamic_library_path: - type: String version_from: 90300 dynamic_shared_memory_type: - type: Enum version_from: 90400 version_till: 120000 possible_values: - posix - sysv - mmap - none - type: Enum version_from: 120000 possible_values: - posix - sysv - mmap effective_cache_size: - type: Integer version_from: 90300 min_val: 1 max_val: 2147483647 unit: 8kB effective_io_concurrency: - type: Integer version_from: 90300 min_val: 0 max_val: 1000 enable_async_append: - type: Bool version_from: 140000 enable_bitmapscan: - type: Bool version_from: 90300 enable_gathermerge: - type: Bool version_from: 100000 enable_hashagg: - type: Bool version_from: 90300 enable_hashjoin: - type: Bool version_from: 90300 enable_incremental_sort: - type: Bool version_from: 130000 enable_indexonlyscan: - type: Bool version_from: 90300 enable_indexscan: - type: Bool version_from: 90300 enable_material: - type: Bool version_from: 90300 enable_memoize: - type: Bool version_from: 150000 enable_mergejoin: - type: Bool version_from: 90300 enable_nestloop: - type: Bool version_from: 90300 enable_parallel_append: - type: Bool version_from: 110000 enable_parallel_hash: - type: Bool version_from: 110000 enable_partition_pruning: - type: Bool version_from: 110000 enable_partitionwise_aggregate: - type: Bool version_from: 110000 enable_partitionwise_join: - type: Bool version_from: 110000 enable_presorted_aggregate: - type: Bool version_from: 160000 enable_seqscan: - type: Bool version_from: 90300 enable_sort: - type: Bool version_from: 90300 enable_tidscan: - type: Bool version_from: 90300 escape_string_warning: - type: Bool version_from: 90300 event_source: - type: String version_from: 90300 exit_on_error: - type: Bool version_from: 90300 extension_destdir: - type: String version_from: 140000 external_pid_file: - type: String version_from: 90300 extra_float_digits: - type: Integer version_from: 90300 min_val: -15 max_val: 3 force_parallel_mode: - type: EnumBool version_from: 90600 version_till: 160000 possible_values: - regress from_collapse_limit: - type: Integer version_from: 90300 min_val: 1 max_val: 2147483647 fsync: - type: Bool version_from: 90300 full_page_writes: - type: Bool version_from: 90300 geqo: - type: Bool version_from: 90300 geqo_effort: - type: Integer version_from: 90300 min_val: 1 max_val: 10 geqo_generations: - type: Integer version_from: 90300 min_val: 0 max_val: 2147483647 geqo_pool_size: - type: Integer version_from: 90300 min_val: 0 max_val: 2147483647 geqo_seed: - type: Real version_from: 90300 min_val: 0 max_val: 1 geqo_selection_bias: - type: Real version_from: 90300 min_val: 1.5 max_val: 2 geqo_threshold: - type: Integer version_from: 90300 min_val: 2 max_val: 2147483647 gin_fuzzy_search_limit: - type: Integer version_from: 90300 min_val: 0 max_val: 2147483647 gin_pending_list_limit: - type: Integer version_from: 90500 min_val: 64 max_val: 2147483647 unit: kB gss_accept_delegation: - type: Bool version_from: 160000 hash_mem_multiplier: - type: Real version_from: 130000 min_val: 1 max_val: 1000 hba_file: - type: String version_from: 90300 hot_standby: - type: Bool version_from: 90300 hot_standby_feedback: - type: Bool version_from: 90300 huge_pages: - type: EnumBool version_from: 90400 possible_values: - try huge_page_size: - type: Integer version_from: 140000 min_val: 0 max_val: 2147483647 unit: kB icu_validation_level: - type: Enum version_from: 160000 possible_values: - disabled - debug5 - debug4 - debug3 - debug2 - debug1 - log - notice - warning - error ident_file: - type: String version_from: 90300 idle_in_transaction_session_timeout: - type: Integer version_from: 90600 min_val: 0 max_val: 2147483647 unit: ms idle_session_timeout: - type: Integer version_from: 140000 min_val: 0 max_val: 2147483647 unit: ms ignore_checksum_failure: - type: Bool version_from: 90300 ignore_invalid_pages: - type: Bool version_from: 130000 ignore_system_indexes: - type: Bool version_from: 90300 IntervalStyle: - type: Enum version_from: 90300 possible_values: - postgres - postgres_verbose - sql_standard - iso_8601 jit: - type: Bool version_from: 110000 jit_above_cost: - type: Real version_from: 110000 min_val: -1 max_val: 1.79769e+308 jit_debugging_support: - type: Bool version_from: 110000 jit_dump_bitcode: - type: Bool version_from: 110000 jit_expressions: - type: Bool version_from: 110000 jit_inline_above_cost: - type: Real version_from: 110000 min_val: -1 max_val: 1.79769e+308 jit_optimize_above_cost: - type: Real version_from: 110000 min_val: -1 max_val: 1.79769e+308 jit_profiling_support: - type: Bool version_from: 110000 jit_provider: - type: String version_from: 110000 jit_tuple_deforming: - type: Bool version_from: 110000 join_collapse_limit: - type: Integer version_from: 90300 min_val: 1 max_val: 2147483647 krb_caseins_users: - type: Bool version_from: 90300 krb_server_keyfile: - type: String version_from: 90300 krb_srvname: - type: String version_from: 90300 version_till: 90400 lc_messages: - type: String version_from: 90300 lc_monetary: - type: String version_from: 90300 lc_numeric: - type: String version_from: 90300 lc_time: - type: String version_from: 90300 listen_addresses: - type: String version_from: 90300 local_preload_libraries: - type: String version_from: 90300 lock_timeout: - type: Integer version_from: 90300 min_val: 0 max_val: 2147483647 unit: ms lo_compat_privileges: - type: Bool version_from: 90300 log_autovacuum_min_duration: - type: Integer version_from: 90300 min_val: -1 max_val: 2147483647 unit: ms log_checkpoints: - type: Bool version_from: 90300 log_connections: - type: Bool version_from: 90300 log_destination: - type: String version_from: 90300 log_directory: - type: String version_from: 90300 log_disconnections: - type: Bool version_from: 90300 log_duration: - type: Bool version_from: 90300 log_error_verbosity: - type: Enum version_from: 90300 possible_values: - terse - default - verbose log_executor_stats: - type: Bool version_from: 90300 log_file_mode: - type: Integer version_from: 90300 min_val: 0 max_val: 511 log_filename: - type: String version_from: 90300 logging_collector: - type: Bool version_from: 90300 log_hostname: - type: Bool version_from: 90300 logical_decoding_work_mem: - type: Integer version_from: 130000 min_val: 64 max_val: 2147483647 unit: kB log_line_prefix: - type: String version_from: 90300 log_lock_waits: - type: Bool version_from: 90300 log_min_duration_sample: - type: Integer version_from: 130000 min_val: -1 max_val: 2147483647 unit: ms log_min_duration_statement: - type: Integer version_from: 90300 min_val: -1 max_val: 2147483647 unit: ms log_min_error_statement: - type: Enum version_from: 90300 possible_values: - debug5 - debug4 - debug3 - debug2 - debug1 - info - notice - warning - error - log - fatal - panic log_min_messages: - type: Enum version_from: 90300 possible_values: - debug5 - debug4 - debug3 - debug2 - debug1 - info - notice - warning - error - log - fatal - panic log_parameter_max_length: - type: Integer version_from: 130000 min_val: -1 max_val: 1073741823 unit: B log_parameter_max_length_on_error: - type: Integer version_from: 130000 min_val: -1 max_val: 1073741823 unit: B log_parser_stats: - type: Bool version_from: 90300 log_planner_stats: - type: Bool version_from: 90300 log_recovery_conflict_waits: - type: Bool version_from: 140000 log_replication_commands: - type: Bool version_from: 90500 log_rotation_age: - type: Integer version_from: 90300 min_val: 0 max_val: 35791394 unit: min log_rotation_size: - type: Integer version_from: 90300 min_val: 0 max_val: 2097151 unit: kB log_startup_progress_interval: - type: Integer version_from: 150000 min_val: 0 max_val: 2147483647 unit: ms log_statement: - type: Enum version_from: 90300 possible_values: - none - ddl - mod - all log_statement_sample_rate: - type: Real version_from: 130000 min_val: 0 max_val: 1 log_statement_stats: - type: Bool version_from: 90300 log_temp_files: - type: Integer version_from: 90300 min_val: -1 max_val: 2147483647 unit: kB log_timezone: - type: String version_from: 90300 log_transaction_sample_rate: - type: Real version_from: 120000 min_val: 0 max_val: 1 log_truncate_on_rotation: - type: Bool version_from: 90300 logical_replication_mode: - type: Enum version_from: 160000 possible_values: - buffered - immediate maintenance_io_concurrency: - type: Integer version_from: 130000 min_val: 0 max_val: 1000 maintenance_work_mem: - type: Integer version_from: 90300 min_val: 1024 max_val: 2147483647 unit: kB max_connections: - type: Integer version_from: 90300 version_till: 90600 min_val: 1 max_val: 8388607 - type: Integer version_from: 90600 min_val: 1 max_val: 262143 max_files_per_process: - type: Integer version_from: 90300 version_till: 130000 min_val: 25 max_val: 2147483647 - type: Integer version_from: 130000 min_val: 64 max_val: 2147483647 max_locks_per_transaction: - type: Integer version_from: 90300 min_val: 10 max_val: 2147483647 max_logical_replication_workers: - type: Integer version_from: 100000 min_val: 0 max_val: 262143 max_parallel_apply_workers_per_subscription: - type: Integer version_from: 160000 min_val: 0 max_val: 1024 max_parallel_maintenance_workers: - type: Integer version_from: 110000 min_val: 0 max_val: 1024 max_parallel_workers: - type: Integer version_from: 100000 min_val: 0 max_val: 1024 max_parallel_workers_per_gather: - type: Integer version_from: 90600 min_val: 0 max_val: 1024 max_pred_locks_per_page: - type: Integer version_from: 100000 min_val: 0 max_val: 2147483647 max_pred_locks_per_relation: - type: Integer version_from: 100000 min_val: -2147483648 max_val: 2147483647 max_pred_locks_per_transaction: - type: Integer version_from: 90300 min_val: 10 max_val: 2147483647 max_prepared_transactions: - type: Integer version_from: 90300 version_till: 90600 min_val: 0 max_val: 8388607 - type: Integer version_from: 90600 min_val: 0 max_val: 262143 max_replication_slots: - type: Integer version_from: 90400 version_till: 90600 min_val: 0 max_val: 8388607 - type: Integer version_from: 90600 min_val: 0 max_val: 262143 max_slot_wal_keep_size: - type: Integer version_from: 130000 min_val: -1 max_val: 2147483647 unit: MB max_stack_depth: - type: Integer version_from: 90300 min_val: 100 max_val: 2147483647 unit: kB max_standby_archive_delay: - type: Integer version_from: 90300 min_val: -1 max_val: 2147483647 unit: ms max_standby_streaming_delay: - type: Integer version_from: 90300 min_val: -1 max_val: 2147483647 unit: ms max_sync_workers_per_subscription: - type: Integer version_from: 100000 min_val: 0 max_val: 262143 max_wal_senders: - type: Integer version_from: 90300 version_till: 90600 min_val: 0 max_val: 8388607 - type: Integer version_from: 90600 min_val: 0 max_val: 262143 max_wal_size: - type: Integer version_from: 90500 version_till: 100000 min_val: 2 max_val: 2147483647 unit: 16MB - type: Integer version_from: 100000 min_val: 2 max_val: 2147483647 unit: MB max_worker_processes: - type: Integer version_from: 90400 version_till: 90600 min_val: 1 max_val: 8388607 - type: Integer version_from: 90600 min_val: 0 max_val: 262143 min_dynamic_shared_memory: - type: Integer version_from: 140000 min_val: 0 max_val: 2147483647 unit: MB min_parallel_index_scan_size: - type: Integer version_from: 100000 min_val: 0 max_val: 715827882 unit: 8kB min_parallel_relation_size: - type: Integer version_from: 90600 version_till: 100000 min_val: 0 max_val: 715827882 unit: 8kB min_parallel_table_scan_size: - type: Integer version_from: 100000 min_val: 0 max_val: 715827882 unit: 8kB min_wal_size: - type: Integer version_from: 90500 version_till: 100000 min_val: 2 max_val: 2147483647 unit: 16MB - type: Integer version_from: 100000 min_val: 2 max_val: 2147483647 unit: MB old_snapshot_threshold: - type: Integer version_from: 90600 min_val: -1 max_val: 86400 unit: min operator_precedence_warning: - type: Bool version_from: 90500 version_till: 140000 parallel_leader_participation: - type: Bool version_from: 110000 parallel_setup_cost: - type: Real version_from: 90600 min_val: 0 max_val: 1.79769e+308 parallel_tuple_cost: - type: Real version_from: 90600 min_val: 0 max_val: 1.79769e+308 password_encryption: - type: Bool version_from: 90300 version_till: 100000 - type: Enum version_from: 100000 possible_values: - md5 - scram-sha-256 plan_cache_mode: - type: Enum version_from: 120000 possible_values: - auto - force_generic_plan - force_custom_plan port: - type: Integer version_from: 90300 min_val: 1 max_val: 65535 post_auth_delay: - type: Integer version_from: 90300 min_val: 0 max_val: 2147 unit: s pre_auth_delay: - type: Integer version_from: 90300 min_val: 0 max_val: 60 unit: s quote_all_identifiers: - type: Bool version_from: 90300 random_page_cost: - type: Real version_from: 90300 min_val: 0 max_val: 1.79769e+308 recovery_init_sync_method: - type: Enum version_from: 140000 possible_values: - fsync - syncfs recovery_prefetch: - type: EnumBool version_from: 150000 possible_values: - try recursive_worktable_factor: - type: Real version_from: 150000 min_val: 0.001 max_val: 1000000.0 remove_temp_files_after_crash: - type: Bool version_from: 140000 replacement_sort_tuples: - type: Integer version_from: 90600 version_till: 110000 min_val: 0 max_val: 2147483647 reserved_connections: - type: Integer version_from: 160000 min_val: 0 max_val: 262143 restart_after_crash: - type: Bool version_from: 90300 row_security: - type: Bool version_from: 90500 scram_iterations: - type: Integer version_from: 160000 min_val: 1 max_val: 2147483647 search_path: - type: String version_from: 90300 send_abort_for_crash: - type: Bool version_from: 160000 send_abort_for_kill: - type: Bool version_from: 160000 seq_page_cost: - type: Real version_from: 90300 min_val: 0 max_val: 1.79769e+308 session_preload_libraries: - type: String version_from: 90400 session_replication_role: - type: Enum version_from: 90300 possible_values: - origin - replica - local shared_buffers: - type: Integer version_from: 90300 min_val: 16 max_val: 1073741823 unit: 8kB shared_memory_type: - type: Enum version_from: 120000 possible_values: - sysv - mmap shared_preload_libraries: - type: String version_from: 90300 sql_inheritance: - type: Bool version_from: 90300 version_till: 100000 ssl: - type: Bool version_from: 90300 ssl_ca_file: - type: String version_from: 90300 ssl_cert_file: - type: String version_from: 90300 ssl_ciphers: - type: String version_from: 90300 ssl_crl_dir: - type: String version_from: 140000 ssl_crl_file: - type: String version_from: 90300 ssl_dh_params_file: - type: String version_from: 100000 ssl_ecdh_curve: - type: String version_from: 90400 ssl_key_file: - type: String version_from: 90300 ssl_max_protocol_version: - type: Enum version_from: 120000 possible_values: - '' - tlsv1 - tlsv1.1 - tlsv1.2 - tlsv1.3 ssl_min_protocol_version: - type: Enum version_from: 120000 possible_values: - tlsv1 - tlsv1.1 - tlsv1.2 - tlsv1.3 ssl_passphrase_command: - type: String version_from: 110000 ssl_passphrase_command_supports_reload: - type: Bool version_from: 110000 ssl_prefer_server_ciphers: - type: Bool version_from: 90400 ssl_renegotiation_limit: - type: Integer version_from: 90300 version_till: 90500 min_val: 0 max_val: 2147483647 unit: kB standard_conforming_strings: - type: Bool version_from: 90300 statement_timeout: - type: Integer version_from: 90300 min_val: 0 max_val: 2147483647 unit: ms stats_fetch_consistency: - type: Enum version_from: 150000 possible_values: - none - cache - snapshot stats_temp_directory: - type: String version_from: 90300 version_till: 150000 superuser_reserved_connections: - type: Integer version_from: 90300 version_till: 90600 min_val: 0 max_val: 8388607 - type: Integer version_from: 90600 min_val: 0 max_val: 262143 synchronize_seqscans: - type: Bool version_from: 90300 synchronous_commit: - type: EnumBool version_from: 90300 version_till: 90600 possible_values: - local - remote_write - type: EnumBool version_from: 90600 possible_values: - local - remote_write - remote_apply synchronous_standby_names: - type: String version_from: 90300 syslog_facility: - type: Enum version_from: 90300 possible_values: - local0 - local1 - local2 - local3 - local4 - local5 - local6 - local7 syslog_ident: - type: String version_from: 90300 syslog_sequence_numbers: - type: Bool version_from: 90600 syslog_split_messages: - type: Bool version_from: 90600 tcp_keepalives_count: - type: Integer version_from: 90300 min_val: 0 max_val: 2147483647 tcp_keepalives_idle: - type: Integer version_from: 90300 min_val: 0 max_val: 2147483647 unit: s tcp_keepalives_interval: - type: Integer version_from: 90300 min_val: 0 max_val: 2147483647 unit: s tcp_user_timeout: - type: Integer version_from: 120000 min_val: 0 max_val: 2147483647 unit: ms temp_buffers: - type: Integer version_from: 90300 min_val: 100 max_val: 1073741823 unit: 8kB temp_file_limit: - type: Integer version_from: 90300 min_val: -1 max_val: 2147483647 unit: kB temp_tablespaces: - type: String version_from: 90300 TimeZone: - type: String version_from: 90300 timezone_abbreviations: - type: String version_from: 90300 trace_notify: - type: Bool version_from: 90300 trace_recovery_messages: - type: Enum version_from: 90300 possible_values: - debug5 - debug4 - debug3 - debug2 - debug1 - log - notice - warning - error trace_sort: - type: Bool version_from: 90300 track_activities: - type: Bool version_from: 90300 track_activity_query_size: - type: Integer version_from: 90300 version_till: 110000 min_val: 100 max_val: 102400 - type: Integer version_from: 110000 version_till: 130000 min_val: 100 max_val: 102400 unit: B - type: Integer version_from: 130000 min_val: 100 max_val: 1048576 unit: B track_commit_timestamp: - type: Bool version_from: 90500 track_counts: - type: Bool version_from: 90300 track_functions: - type: Enum version_from: 90300 possible_values: - none - pl - all track_io_timing: - type: Bool version_from: 90300 track_wal_io_timing: - type: Bool version_from: 140000 transaction_deferrable: - type: Bool version_from: 90300 transaction_isolation: - type: Enum version_from: 90300 possible_values: - serializable - repeatable read - read committed - read uncommitted transaction_read_only: - type: Bool version_from: 90300 transform_null_equals: - type: Bool version_from: 90300 unix_socket_directories: - type: String version_from: 90300 unix_socket_group: - type: String version_from: 90300 unix_socket_permissions: - type: Integer version_from: 90300 min_val: 0 max_val: 511 update_process_title: - type: Bool version_from: 90300 vacuum_buffer_usage_limit: - type: Integer version_from: 160000 min_val: 0 max_val: 16777216 unit: kB vacuum_cleanup_index_scale_factor: - type: Real version_from: 110000 version_till: 140000 min_val: 0 max_val: 10000000000.0 vacuum_cost_delay: - type: Integer version_from: 90300 version_till: 120000 min_val: 0 max_val: 100 unit: ms - type: Real version_from: 120000 min_val: 0 max_val: 100 unit: ms vacuum_cost_limit: - type: Integer version_from: 90300 min_val: 1 max_val: 10000 vacuum_cost_page_dirty: - type: Integer version_from: 90300 min_val: 0 max_val: 10000 vacuum_cost_page_hit: - type: Integer version_from: 90300 min_val: 0 max_val: 10000 vacuum_cost_page_miss: - type: Integer version_from: 90300 min_val: 0 max_val: 10000 vacuum_defer_cleanup_age: - type: Integer version_from: 90300 version_till: 160000 min_val: 0 max_val: 1000000 vacuum_failsafe_age: - type: Integer version_from: 140000 min_val: 0 max_val: 2100000000 vacuum_freeze_min_age: - type: Integer version_from: 90300 min_val: 0 max_val: 1000000000 vacuum_freeze_table_age: - type: Integer version_from: 90300 min_val: 0 max_val: 2000000000 vacuum_multixact_failsafe_age: - type: Integer version_from: 140000 min_val: 0 max_val: 2100000000 vacuum_multixact_freeze_min_age: - type: Integer version_from: 90300 min_val: 0 max_val: 1000000000 vacuum_multixact_freeze_table_age: - type: Integer version_from: 90300 min_val: 0 max_val: 2000000000 wal_buffers: - type: Integer version_from: 90300 min_val: -1 max_val: 262143 unit: 8kB wal_compression: - type: Bool version_from: 90500 version_till: 150000 - type: EnumBool version_from: 150000 possible_values: - pglz - lz4 - zstd wal_consistency_checking: - type: String version_from: 100000 wal_decode_buffer_size: - type: Integer version_from: 150000 min_val: 65536 max_val: 1073741823 unit: B wal_init_zero: - type: Bool version_from: 120000 wal_keep_segments: - type: Integer version_from: 90300 version_till: 130000 min_val: 0 max_val: 2147483647 wal_keep_size: - type: Integer version_from: 130000 min_val: 0 max_val: 2147483647 unit: MB wal_level: - type: Enum version_from: 90300 version_till: 90400 possible_values: - minimal - archive - hot_standby - type: Enum version_from: 90400 version_till: 90600 possible_values: - minimal - archive - hot_standby - logical - type: Enum version_from: 90600 possible_values: - minimal - replica - logical wal_log_hints: - type: Bool version_from: 90400 wal_receiver_create_temp_slot: - type: Bool version_from: 130000 wal_receiver_status_interval: - type: Integer version_from: 90300 min_val: 0 max_val: 2147483 unit: s wal_receiver_timeout: - type: Integer version_from: 90300 min_val: 0 max_val: 2147483647 unit: ms wal_recycle: - type: Bool version_from: 120000 wal_retrieve_retry_interval: - type: Integer version_from: 90500 min_val: 1 max_val: 2147483647 unit: ms wal_sender_timeout: - type: Integer version_from: 90300 min_val: 0 max_val: 2147483647 unit: ms wal_skip_threshold: - type: Integer version_from: 130000 min_val: 0 max_val: 2147483647 unit: kB wal_sync_method: - type: Enum version_from: 90300 possible_values: - fsync - fdatasync - open_sync - open_datasync wal_writer_delay: - type: Integer version_from: 90300 min_val: 1 max_val: 10000 unit: ms wal_writer_flush_after: - type: Integer version_from: 90600 min_val: 0 max_val: 2147483647 unit: 8kB work_mem: - type: Integer version_from: 90300 min_val: 64 max_val: 2147483647 unit: kB xmlbinary: - type: Enum version_from: 90300 possible_values: - base64 - hex xmloption: - type: Enum version_from: 90300 possible_values: - content - document zero_damaged_pages: - type: Bool version_from: 90300 recovery_parameters: archive_cleanup_command: - type: String version_from: 90300 pause_at_recovery_target: - type: Bool version_from: 90300 version_till: 90500 primary_conninfo: - type: String version_from: 90300 primary_slot_name: - type: String version_from: 90400 promote_trigger_file: - type: String version_from: 120000 version_till: 160000 recovery_end_command: - type: String version_from: 90300 recovery_min_apply_delay: - type: Integer version_from: 90400 min_val: 0 max_val: 2147483647 unit: ms recovery_target: - type: Enum version_from: 90400 possible_values: - immediate - '' recovery_target_action: - type: Enum version_from: 90500 possible_values: - pause - promote - shutdown recovery_target_inclusive: - type: Bool version_from: 90300 recovery_target_lsn: - type: String version_from: 100000 recovery_target_name: - type: String version_from: 90400 recovery_target_time: - type: String version_from: 90300 recovery_target_timeline: - type: String version_from: 90300 recovery_target_xid: - type: String version_from: 90300 restore_command: - type: String version_from: 90300 standby_mode: - type: Bool version_from: 90300 version_till: 120000 trigger_file: - type: String version_from: 90300 version_till: 120000 patroni-3.2.2/patroni/postgresql/bootstrap.py000066400000000000000000000563521455170150700214600ustar00rootroot00000000000000import logging import os import shlex import tempfile import time from typing import Any, Callable, Dict, List, Optional, Union, Tuple, TYPE_CHECKING from ..async_executor import CriticalTask from ..dcs import Leader, Member, RemoteMember from ..psycopg import quote_ident, quote_literal from ..utils import deep_compare, unquote if TYPE_CHECKING: # pragma: no cover from . import Postgresql logger = logging.getLogger(__name__) class Bootstrap(object): def __init__(self, postgresql: 'Postgresql') -> None: self._postgresql = postgresql self._running_custom_bootstrap = False @property def running_custom_bootstrap(self) -> bool: return self._running_custom_bootstrap @property def keep_existing_recovery_conf(self) -> bool: return self._running_custom_bootstrap and self._keep_existing_recovery_conf @staticmethod def process_user_options(tool: str, options: Union[Any, Dict[str, str], List[Union[str, Dict[str, Any]]]], not_allowed_options: Tuple[str, ...], error_handler: Callable[[str], None]) -> List[str]: """Format *options* in a list or dictionary format into command line long form arguments. .. note:: The format of the output of this method is to prepare arguments for use in the ``initdb`` method of `self._postgres`. :Example: The *options* can be defined as a dictionary of key, values to be converted into arguments: >>> Bootstrap.process_user_options('foo', {'foo': 'bar'}, (), print) ['--foo=bar'] Or as a list of single string arguments >>> Bootstrap.process_user_options('foo', ['yes'], (), print) ['--yes'] Or as a list of key, value options >>> Bootstrap.process_user_options('foo', [{'foo': 'bar'}], (), print) ['--foo=bar'] Or a combination of single and key, values >>> Bootstrap.process_user_options('foo', ['yes', {'foo': 'bar'}], (), print) ['--yes', '--foo=bar'] Options that contain spaces are passed as is to ``subprocess.call`` >>> Bootstrap.process_user_options('foo', [{'foo': 'bar baz'}], (), print) ['--foo=bar baz'] Options that are quoted will be unquoted, so the quotes aren't interpreted literally by the postgres command >>> Bootstrap.process_user_options('foo', [{'foo': '"bar baz"'}], (), print) ['--foo=bar baz'] .. note:: The *error_handler* is called when any of these conditions are met: * Key, value dictionaries in the list form contains multiple keys. * If a key is listed in *not_allowed_options*. * If the options list is not in the required structure. :param tool: The name of the tool used in error reports to *error_handler* :param options: Options to parse as a list of key, values or single values, or a dictionary :param not_allowed_options: List of keys that cannot be used in the list of key, value formatted options :param error_handler: A function which will be called when an error condition is encountered :returns: List of long form arguments to pass to the named tool """ user_options: List[str] = [] def option_is_allowed(name: str) -> bool: ret = name not in not_allowed_options if not ret: error_handler('{0} option for {1} is not allowed'.format(name, tool)) return ret if isinstance(options, dict): for key, val in options.items(): if key and val: user_options.append('--{0}={1}'.format(key, unquote(val))) elif isinstance(options, list): for opt in options: if isinstance(opt, str) and option_is_allowed(opt): user_options.append('--{0}'.format(opt)) elif isinstance(opt, dict): keys = list(opt.keys()) if len(keys) != 1 or not isinstance(opt[keys[0]], str) or not option_is_allowed(keys[0]): error_handler('Error when parsing {0} key-value option {1}: only one key-value is allowed' ' and value should be a string'.format(tool, opt[keys[0]])) user_options.append('--{0}={1}'.format(keys[0], unquote(opt[keys[0]]))) else: error_handler('Error when parsing {0} option {1}: value should be string value' ' or a single key-value pair'.format(tool, opt)) else: error_handler('{0} options must be list or dict'.format(tool)) return user_options def _initdb(self, config: Any) -> bool: self._postgresql.set_state('initializing new cluster') not_allowed_options = ('pgdata', 'nosync', 'pwfile', 'sync-only', 'version') def error_handler(e: str) -> None: raise Exception(e) options = self.process_user_options('initdb', config or [], not_allowed_options, error_handler) pwfile = None if self._postgresql.config.superuser: if 'username' in self._postgresql.config.superuser: options.append('--username={0}'.format(self._postgresql.config.superuser['username'])) if 'password' in self._postgresql.config.superuser: (fd, pwfile) = tempfile.mkstemp() os.write(fd, self._postgresql.config.superuser['password'].encode('utf-8')) os.close(fd) options.append('--pwfile={0}'.format(pwfile)) ret = self._postgresql.initdb(*options) if pwfile: os.remove(pwfile) if ret: self._postgresql.configure_server_parameters() else: self._postgresql.set_state('initdb failed') return ret def _post_restore(self) -> None: self._postgresql.config.restore_configuration_files() self._postgresql.configure_server_parameters() # make sure there is no trigger file or postgres will be automatically promoted trigger_file = self._postgresql.config.triggerfile_good_name trigger_file = (self._postgresql.config.get('recovery_conf') or {}).get(trigger_file) or 'promote' trigger_file = os.path.abspath(os.path.join(self._postgresql.data_dir, trigger_file)) if os.path.exists(trigger_file): os.unlink(trigger_file) def _custom_bootstrap(self, config: Any) -> bool: """Bootstrap a fresh Patroni cluster using a custom method provided by the user. :param config: configuration used for running a custom bootstrap method. It comes from the Patroni YAML file, so it is expected to be a :class:`dict`. .. note:: *config* must contain a ``command`` key, which value is the command or script to perform the custom bootstrap procedure. The exit code of the ``command`` dictates if the bootstrap succeeded or failed. When calling ``command``, Patroni will pass the following arguments to the ``command`` call: * ``--scope``: contains the value of ``scope`` configuration; * ``--data_dir``: contains the value of the ``postgresql.data_dir`` configuration. You can avoid that behavior by filling the optional key ``no_params`` with the value ``False`` in the configuration file, which will instruct Patroni to not pass these parameters to the ``command`` call. Besides that, a couple more keys are supported in *config*, but optional: * ``keep_existing_recovery_conf``: if ``True``, instruct Patroni to not remove the existing ``recovery.conf`` (PostgreSQL <= 11), to not discard recovery parameters from the configuration (PostgreSQL >= 12), and to not remove the files ``recovery.signal`` or ``standby.signal`` (PostgreSQL >= 12). This is specially useful when you are restoring backups through tools like pgBackRest and Barman, in which case they generated the appropriate recovery settings for you; * ``recovery_conf``: a section containing a map, where each key is the name of a recovery related setting, and the value is the value of the corresponding setting. Any key/value other than the ones that were described above will be interpreted as additional arguments for the ``command`` call. They will all be added to the call in the format ``--key=value``. :returns: ``True`` if the bootstrap was successful, i.e. the execution of the custom ``command`` from *config* exited with code ``0``, ``False`` otherwise. """ self._postgresql.set_state('running custom bootstrap script') params = [] if config.get('no_params') else ['--scope=' + self._postgresql.scope, '--datadir=' + self._postgresql.data_dir] # Add custom parameters specified by the user reserved_args = {'command', 'no_params', 'keep_existing_recovery_conf', 'recovery_conf', 'scope', 'datadir'} params += [f"--{arg}={val}" for arg, val in config.items() if arg not in reserved_args] try: logger.info('Running custom bootstrap script: %s', config['command']) if self._postgresql.cancellable.call(shlex.split(config['command']) + params) != 0: self._postgresql.set_state('custom bootstrap failed') return False except Exception: logger.exception('Exception during custom bootstrap') return False self._post_restore() if 'recovery_conf' in config: self._postgresql.config.write_recovery_conf(config['recovery_conf']) elif not self.keep_existing_recovery_conf: self._postgresql.config.remove_recovery_conf() return True def call_post_bootstrap(self, config: Dict[str, Any]) -> bool: """ runs a script after initdb or custom bootstrap script is called and waits until completion. """ cmd = config.get('post_bootstrap') or config.get('post_init') if cmd: r = self._postgresql.connection_pool.conn_kwargs connstring = self._postgresql.config.format_dsn(r, True) if 'host' not in r: # https://www.postgresql.org/docs/current/static/libpq-pgpass.html # A host name of localhost matches both TCP (host name localhost) and Unix domain socket # (pghost empty or the default socket directory) connections coming from the local machine. r['host'] = 'localhost' # set it to localhost to write into pgpass env = self._postgresql.config.write_pgpass(r) env['PGOPTIONS'] = '-c synchronous_commit=local -c statement_timeout=0' try: ret = self._postgresql.cancellable.call(shlex.split(cmd) + [connstring], env=env) except OSError: logger.error('post_init script %s failed', cmd) return False if ret != 0: logger.error('post_init script %s returned non-zero code %d', cmd, ret) return False return True def create_replica(self, clone_member: Union[Leader, Member, None]) -> Optional[int]: """ create the replica according to the replica_method defined by the user. this is a list, so we need to loop through all methods the user supplies """ self._postgresql.set_state('creating replica') self._postgresql.schedule_sanity_checks_after_pause() is_remote_member = isinstance(clone_member, RemoteMember) # get list of replica methods either from clone member or from # the config. If there is no configuration key, or no value is # specified, use basebackup replica_methods = (clone_member.create_replica_methods if is_remote_member else self._postgresql.create_replica_methods) or ['basebackup'] if clone_member and clone_member.conn_url: r = clone_member.conn_kwargs(self._postgresql.config.replication) # add the credentials to connect to the replica origin to pgpass. env = self._postgresql.config.write_pgpass(r) connstring = self._postgresql.config.format_dsn(r, True) else: connstring = '' env = os.environ.copy() # if we don't have any source, leave only replica methods that work without it replica_methods = [r for r in replica_methods if self._postgresql.replica_method_can_work_without_replication_connection(r)] # go through them in priority order ret = 1 for replica_method in replica_methods: if self._postgresql.cancellable.is_cancelled: break method_config = self._postgresql.replica_method_options(replica_method) # if the method is basebackup, then use the built-in if replica_method == "basebackup": ret = self.basebackup(connstring, env, method_config) if ret == 0: logger.info("replica has been created using basebackup") # if basebackup succeeds, exit with success break else: if not self._postgresql.data_directory_empty(): if method_config.get('keep_data', False): logger.info('Leaving data directory uncleaned') else: self._postgresql.remove_data_directory() cmd = replica_method # user-defined method; check for configuration # not required, actually if method_config: # look to see if the user has supplied a full command path # if not, use the method name as the command cmd = method_config.pop('command', cmd) # add the default parameters if not method_config.get('no_params', False): method_config.update({"scope": self._postgresql.scope, "role": "replica", "datadir": self._postgresql.data_dir, "connstring": connstring}) else: for param in ('no_params', 'no_master', 'no_leader', 'keep_data'): method_config.pop(param, None) params = ["--{0}={1}".format(arg, val) for arg, val in method_config.items()] try: # call script with the full set of parameters ret = self._postgresql.cancellable.call(shlex.split(cmd) + params, env=env) # if we succeeded, stop if ret == 0: logger.info('replica has been created using %s', replica_method) break else: logger.error('Error creating replica using method %s: %s exited with code=%s', replica_method, cmd, ret) except Exception: logger.exception('Error creating replica using method %s', replica_method) ret = 1 self._postgresql.set_state('stopped') return ret def basebackup(self, conn_url: str, env: Dict[str, str], options: Dict[str, Any]) -> Optional[int]: # creates a replica data dir using pg_basebackup. # this is the default, built-in create_replica_methods # tries twice, then returns failure (as 1) # uses "stream" as the xlog-method to avoid sync issues # supports additional user-supplied options, those are not validated maxfailures = 2 ret = 1 not_allowed_options = ('pgdata', 'format', 'wal-method', 'xlog-method', 'gzip', 'version', 'compress', 'dbname', 'host', 'port', 'username', 'password') user_options = self.process_user_options('basebackup', options, not_allowed_options, logger.error) for bbfailures in range(0, maxfailures): if self._postgresql.cancellable.is_cancelled: break if not self._postgresql.data_directory_empty(): self._postgresql.remove_data_directory() try: ret = self._postgresql.cancellable.call([self._postgresql.pgcommand('pg_basebackup'), '--pgdata=' + self._postgresql.data_dir, '-X', 'stream', '--dbname=' + conn_url] + user_options, env=env) if ret == 0: break else: logger.error('Error when fetching backup: pg_basebackup exited with code=%s', ret) except Exception as e: logger.error('Error when fetching backup with pg_basebackup: %s', e) if bbfailures < maxfailures - 1: logger.warning('Trying again in 5 seconds') time.sleep(5) return ret def clone(self, clone_member: Union[Leader, Member, None]) -> bool: """ - initialize the replica from an existing member (primary or replica) - initialize the replica using the replica creation method that works without the replication connection (i.e. restore from on-disk base backup) """ ret = self.create_replica(clone_member) == 0 if ret: self._post_restore() return ret def bootstrap(self, config: Dict[str, Any]) -> bool: """ Initialize a new node from scratch and start it. """ pg_hba = config.get('pg_hba', []) method = config.get('method') or 'initdb' if method != 'initdb' and method in config and 'command' in config[method]: self._keep_existing_recovery_conf = config[method].get('keep_existing_recovery_conf') self._running_custom_bootstrap = True do_initialize = self._custom_bootstrap else: method = 'initdb' do_initialize = self._initdb return do_initialize(config.get(method)) and self._postgresql.config.append_pg_hba(pg_hba) \ and self._postgresql.config.save_configuration_files() and bool(self._postgresql.start()) def create_or_update_role(self, name: str, password: Optional[str], options: List[str]) -> None: options = list(map(str.upper, options)) if 'NOLOGIN' not in options and 'LOGIN' not in options: options.append('LOGIN') if password: options.extend(['PASSWORD', quote_literal(password)]) sql = """DO $$ BEGIN SET local synchronous_commit = 'local'; PERFORM * FROM pg_catalog.pg_authid WHERE rolname = {0}; IF FOUND THEN ALTER ROLE {1} WITH {2}; ELSE CREATE ROLE {1} WITH {2}; END IF; END;$$""".format(quote_literal(name), quote_ident(name, self._postgresql.connection()), ' '.join(options)) self._postgresql.query('SET log_statement TO none') self._postgresql.query('SET log_min_duration_statement TO -1') self._postgresql.query("SET log_min_error_statement TO 'log'") self._postgresql.query("SET pg_stat_statements.track_utility to 'off'") try: self._postgresql.query(sql) finally: self._postgresql.query('RESET log_min_error_statement') self._postgresql.query('RESET log_min_duration_statement') self._postgresql.query('RESET log_statement') self._postgresql.query('RESET pg_stat_statements.track_utility') def post_bootstrap(self, config: Dict[str, Any], task: CriticalTask) -> Optional[bool]: try: postgresql = self._postgresql superuser = postgresql.config.superuser if 'username' in superuser and 'password' in superuser: self.create_or_update_role(superuser['username'], superuser['password'], ['SUPERUSER']) task.complete(self.call_post_bootstrap(config)) if task.result: replication = postgresql.config.replication self.create_or_update_role(replication['username'], replication.get('password'), ['REPLICATION']) rewind = postgresql.config.rewind_credentials if not deep_compare(rewind, superuser): self.create_or_update_role(rewind['username'], rewind.get('password'), []) for f in ('pg_ls_dir(text, boolean, boolean)', 'pg_stat_file(text, boolean)', 'pg_read_binary_file(text)', 'pg_read_binary_file(text, bigint, bigint, boolean)'): sql = """DO $$ BEGIN SET local synchronous_commit = 'local'; GRANT EXECUTE ON function pg_catalog.{0} TO {1}; END;$$""".format(f, quote_ident(rewind['username'], postgresql.connection())) postgresql.query(sql) if config.get('users'): logger.warning('User creation via "bootstrap.users" will be removed in v4.0.0') for name, value in (config.get('users') or {}).items(): if all(name != a.get('username') for a in (superuser, replication, rewind)): self.create_or_update_role(name, value.get('password'), value.get('options', [])) # We were doing a custom bootstrap instead of running initdb, therefore we opened trust # access from certain addresses to be able to reach cluster and change password if self._running_custom_bootstrap: self._running_custom_bootstrap = False # If we don't have custom configuration for pg_hba.conf we need to restore original file if not postgresql.config.get('pg_hba'): if os.path.exists(postgresql.config.pg_hba_conf): os.unlink(postgresql.config.pg_hba_conf) postgresql.config.restore_configuration_files() postgresql.config.write_postgresql_conf() postgresql.config.replace_pg_ident() # at this point there should be no recovery.conf postgresql.config.remove_recovery_conf() if postgresql.config.hba_file: postgresql.restart() else: postgresql.config.replace_pg_hba() if postgresql.pending_restart: postgresql.restart() else: postgresql.reload() time.sleep(1) # give a time to postgres to "reload" configuration files postgresql.connection().close() # close connection to reconnect with a new password else: # initdb # We may want create database and extension for citus self._postgresql.citus_handler.bootstrap() except Exception: logger.exception('post_bootstrap') task.complete(False) return task.result patroni-3.2.2/patroni/postgresql/callback_executor.py000066400000000000000000000046271455170150700231130ustar00rootroot00000000000000import logging import sys from enum import Enum from threading import Condition, Thread from typing import Any, Dict, List from .cancellable import CancellableExecutor, CancellableSubprocess logger = logging.getLogger(__name__) class CallbackAction(str, Enum): NOOP = "noop" ON_START = "on_start" ON_STOP = "on_stop" ON_RESTART = "on_restart" ON_RELOAD = "on_reload" ON_ROLE_CHANGE = "on_role_change" def __repr__(self) -> str: return self.value class OnReloadExecutor(CancellableSubprocess): def call_nowait(self, cmd: List[str]) -> None: """Run one `on_reload` callback at most. To achieve it we always kill already running command including child processes.""" self.cancel(kill=True) self._kill_children() with self._lock: started = self._start_process(cmd, close_fds=True) if started and self._process is not None: Thread(target=self._process.wait).start() class CallbackExecutor(CancellableExecutor, Thread): def __init__(self): CancellableExecutor.__init__(self) Thread.__init__(self) self.daemon = True self._on_reload_executor = OnReloadExecutor() self._cmd = None self._condition = Condition() self.start() def call(self, cmd: List[str]) -> None: """Executes one callback at a time. Already running command is killed (including child processes). If it couldn't be killed we wait until it finishes. :param cmd: command to be executed""" kwargs: Dict[str, Any] = {'stacklevel': 3} if sys.version_info >= (3, 8) else {} logger.debug('CallbackExecutor.call(%s)', cmd, **kwargs) if cmd[-3] == CallbackAction.ON_RELOAD: return self._on_reload_executor.call_nowait(cmd) self._kill_process() with self._condition: self._cmd = cmd self._condition.notify() def run(self) -> None: while True: with self._condition: if self._cmd is None: self._condition.wait() cmd, self._cmd = self._cmd, None if cmd is not None: with self._lock: if not self._start_process(cmd, close_fds=True): continue if self._process: self._process.wait() self._kill_children() patroni-3.2.2/patroni/postgresql/cancellable.py000066400000000000000000000110521455170150700216540ustar00rootroot00000000000000import logging import psutil import subprocess from patroni.exceptions import PostgresException from patroni.utils import polling_loop from threading import Lock from typing import Any, Dict, List, Optional, Union logger = logging.getLogger(__name__) class CancellableExecutor(object): """ There must be only one such process so that AsyncExecutor can easily cancel it. """ def __init__(self) -> None: self._process = None self._process_cmd = None self._process_children: List[psutil.Process] = [] self._lock = Lock() def _start_process(self, cmd: List[str], *args: Any, **kwargs: Any) -> Optional[bool]: """This method must be executed only when the `_lock` is acquired""" try: self._process_children = [] self._process_cmd = cmd self._process = psutil.Popen(cmd, *args, **kwargs) except Exception: return logger.exception('Failed to execute %s', cmd) return True def _kill_process(self) -> None: with self._lock: if self._process is not None and self._process.is_running() and not self._process_children: try: self._process.suspend() # Suspend the process before getting list of children except psutil.Error as e: logger.info('Failed to suspend the process: %s', e.msg) try: self._process_children = self._process.children(recursive=True) except psutil.Error: pass try: self._process.kill() logger.warning('Killed %s because it was still running', self._process_cmd) except psutil.NoSuchProcess: pass except psutil.AccessDenied as e: logger.warning('Failed to kill the process: %s', e.msg) def _kill_children(self) -> None: waitlist: List[psutil.Process] = [] with self._lock: for child in self._process_children: try: child.kill() except psutil.NoSuchProcess: continue except psutil.AccessDenied as e: logger.info('Failed to kill child process: %s', e.msg) waitlist.append(child) psutil.wait_procs(waitlist) class CancellableSubprocess(CancellableExecutor): def __init__(self) -> None: super(CancellableSubprocess, self).__init__() self._is_cancelled = False def call(self, *args: Any, **kwargs: Union[Any, Dict[str, str]]) -> Optional[int]: for s in ('stdin', 'stdout', 'stderr'): kwargs.pop(s, None) communicate: Optional[Dict[str, str]] = kwargs.pop('communicate', None) input_data = None if isinstance(communicate, dict): input_data = communicate.get('input') if input_data: if input_data[-1] != '\n': input_data += '\n' input_data = input_data.encode('utf-8') kwargs['stdin'] = subprocess.PIPE kwargs['stdout'] = subprocess.PIPE kwargs['stderr'] = subprocess.PIPE try: with self._lock: if self._is_cancelled: raise PostgresException('cancelled') self._is_cancelled = False started = self._start_process(*args, **kwargs) if started and self._process is not None: if isinstance(communicate, dict): communicate['stdout'], communicate['stderr'] = self._process.communicate(input_data) return self._process.wait() finally: with self._lock: self._process = None self._kill_children() def reset_is_cancelled(self) -> None: with self._lock: self._is_cancelled = False @property def is_cancelled(self) -> bool: with self._lock: return self._is_cancelled def cancel(self, kill: bool = False) -> None: with self._lock: self._is_cancelled = True if self._process is None or not self._process.is_running(): return logger.info('Terminating %s', self._process_cmd) self._process.terminate() for _ in polling_loop(10): with self._lock: if self._process is None or not self._process.is_running(): return if kill: break self._kill_process() patroni-3.2.2/patroni/postgresql/citus.py000066400000000000000000000454671455170150700205770ustar00rootroot00000000000000import logging import re import time from threading import Condition, Event, Thread from urllib.parse import urlparse from typing import Any, Dict, List, Optional, Union, Tuple, TYPE_CHECKING from ..dcs import CITUS_COORDINATOR_GROUP_ID, Cluster from ..psycopg import connect, quote_ident, ProgrammingError if TYPE_CHECKING: # pragma: no cover from . import Postgresql CITUS_SLOT_NAME_RE = re.compile(r'^citus_shard_(move|split)_slot(_[1-9][0-9]*){2,3}$') logger = logging.getLogger(__name__) class PgDistNode(object): """Represents a single row in the `pg_dist_node` table""" def __init__(self, group: int, host: str, port: int, event: str, nodeid: Optional[int] = None, timeout: Optional[float] = None, cooldown: Optional[float] = None) -> None: self.group = group # A weird way of pausing client connections by adding the `-demoted` suffix to the hostname self.host = host + ('-demoted' if event == 'before_demote' else '') self.port = port # Event that is trying to change or changed the given row. # Possible values: before_demote, before_promote, after_promote. self.event = event self.nodeid = nodeid # If transaction was started, we need to COMMIT/ROLLBACK before the deadline self.timeout = timeout self.cooldown = cooldown or 10000 # 10s by default self.deadline: float = 0 # All changes in the pg_dist_node are serialized on the Patroni # side by performing them from a thread. The thread, that is # requested a change, sometimes needs to wait for a result. # For example, we want to pause client connections before demoting # the worker, and once it is done notify the calling thread. self._event = Event() def wait(self) -> None: self._event.wait() def wakeup(self) -> None: self._event.set() def __eq__(self, other: Any) -> bool: return isinstance(other, PgDistNode) and self.event == other.event\ and self.host == other.host and self.port == other.port def __ne__(self, other: Any) -> bool: return not self == other def __str__(self) -> str: return ('PgDistNode(nodeid={0},group={1},host={2},port={3},event={4})' .format(self.nodeid, self.group, self.host, self.port, self.event)) def __repr__(self) -> str: return str(self) class CitusHandler(Thread): def __init__(self, postgresql: 'Postgresql', config: Optional[Dict[str, Union[str, int]]]) -> None: super(CitusHandler, self).__init__() self.daemon = True self._postgresql = postgresql self._config = config if config: self._connection = postgresql.connection_pool.get( 'citus', {'dbname': config['database'], 'options': '-c statement_timeout=0 -c idle_in_transaction_session_timeout=0'}) self._pg_dist_node: Dict[int, PgDistNode] = {} # Cache of pg_dist_node: {groupid: PgDistNode()} self._tasks: List[PgDistNode] = [] # Requests to change pg_dist_node, every task is a `PgDistNode` self._in_flight: Optional[PgDistNode] = None # Reference to the `PgDistNode` being changed in a transaction self._schedule_load_pg_dist_node = True # Flag that "pg_dist_node" should be queried from the database self._condition = Condition() # protects _pg_dist_node, _tasks, _in_flight, and _schedule_load_pg_dist_node self.schedule_cache_rebuild() def is_enabled(self) -> bool: return isinstance(self._config, dict) def group(self) -> Optional[int]: return int(self._config['group']) if isinstance(self._config, dict) else None def is_coordinator(self) -> bool: return self.is_enabled() and self.group() == CITUS_COORDINATOR_GROUP_ID def is_worker(self) -> bool: return self.is_enabled() and not self.is_coordinator() def schedule_cache_rebuild(self) -> None: with self._condition: self._schedule_load_pg_dist_node = True def on_demote(self) -> None: with self._condition: self._pg_dist_node.clear() empty_tasks: List[PgDistNode] = [] self._tasks[:] = empty_tasks self._in_flight = None def query(self, sql: str, *params: Any) -> List[Tuple[Any, ...]]: try: logger.debug('query(%s, %s)', sql, params) return self._connection.query(sql, *params) except Exception as e: logger.error('Exception when executing query "%s", (%s): %r', sql, params, e) self._connection.close() with self._condition: self._in_flight = None self.schedule_cache_rebuild() raise e def load_pg_dist_node(self) -> bool: """Read from the `pg_dist_node` table and put it into the local cache""" with self._condition: if not self._schedule_load_pg_dist_node: return True self._schedule_load_pg_dist_node = False try: rows = self.query("SELECT nodeid, groupid, nodename, nodeport, noderole" " FROM pg_catalog.pg_dist_node WHERE noderole = 'primary'") except Exception: return False with self._condition: self._pg_dist_node = {r[1]: PgDistNode(r[1], r[2], r[3], 'after_promote', r[0]) for r in rows} return True def sync_pg_dist_node(self, cluster: Cluster) -> None: """Maintain the `pg_dist_node` from the coordinator leader every heartbeat loop. We can't always rely on REST API calls from worker nodes in order to maintain `pg_dist_node`, therefore at least once per heartbeat loop we make sure that workes registered in `self._pg_dist_node` cache are matching the cluster view from DCS by creating tasks the same way as it is done from the REST API.""" if not self.is_coordinator(): return with self._condition: if not self.is_alive(): self.start() self.add_task('after_promote', CITUS_COORDINATOR_GROUP_ID, self._postgresql.connection_string) for group, worker in cluster.workers.items(): leader = worker.leader if leader and leader.conn_url\ and leader.data.get('role') in ('master', 'primary') and leader.data.get('state') == 'running': self.add_task('after_promote', group, leader.conn_url) def find_task_by_group(self, group: int) -> Optional[int]: for i, task in enumerate(self._tasks): if task.group == group: return i def pick_task(self) -> Tuple[Optional[int], Optional[PgDistNode]]: """Returns the tuple(i, task), where `i` - is the task index in the self._tasks list Tasks are picked by following priorities: 1. If there is already a transaction in progress, pick a task that that will change already affected worker primary. 2. If the coordinator address should be changed - pick a task with group=0 (coordinators are always in group 0). 3. Pick a task that is the oldest (first from the self._tasks) """ with self._condition: if self._in_flight: i = self.find_task_by_group(self._in_flight.group) else: while True: i = self.find_task_by_group(CITUS_COORDINATOR_GROUP_ID) # set_coordinator if i is None and self._tasks: i = 0 if i is None: break task = self._tasks[i] if task == self._pg_dist_node.get(task.group): self._tasks.pop(i) # nothing to do because cached version of pg_dist_node already matches else: break task = self._tasks[i] if i is not None else None # When tasks are added it could happen that self._pg_dist_node # wasn't ready (self._schedule_load_pg_dist_node is False) # and hence the nodeid wasn't filled. if task and task.group in self._pg_dist_node: task.nodeid = self._pg_dist_node[task.group].nodeid return i, task def update_node(self, task: PgDistNode) -> None: if task.nodeid is not None: self.query('SELECT pg_catalog.citus_update_node(%s, %s, %s, true, %s)', task.nodeid, task.host, task.port, task.cooldown) elif task.event != 'before_demote': task.nodeid = self.query("SELECT pg_catalog.citus_add_node(%s, %s, %s, 'primary', 'default')", task.host, task.port, task.group)[0][0] def process_task(self, task: PgDistNode) -> bool: """Updates a single row in `pg_dist_node` table, optionally in a transaction. The transaction is started if we do a demote of the worker node or before promoting the other worker if there is no transaction in progress. And, the transaction is committed when the switchover/failover completed. .. note: The maximum lifetime of the transaction in progress is controlled outside of this method. .. note: Read access to `self._in_flight` isn't protected because we know it can't be changed outside of our thread. :param task: reference to a :class:`PgDistNode` object that represents a row to be updated/created. :returns: `True` if the row was succesfully created/updated or transaction in progress was committed as an indicator that the `self._pg_dist_node` cache should be updated, or, if the new transaction was opened, this method returns `False`. """ if task.event == 'after_promote': # The after_promote may happen without previous before_demote and/or # before_promore. In this case we just call self.update_node() method. # If there is a transaction in progress, it could be that it already did # required changes and we can simply COMMIT. if not self._in_flight or self._in_flight.host != task.host or self._in_flight.port != task.port: self.update_node(task) if self._in_flight: self.query('COMMIT') return True else: # before_demote, before_promote if task.timeout: task.deadline = time.time() + task.timeout if not self._in_flight: self.query('BEGIN') self.update_node(task) return False def process_tasks(self) -> None: while True: # Read access to `_in_flight` isn't protected because we know it can't be changed outside of our thread. if not self._in_flight and not self.load_pg_dist_node(): break i, task = self.pick_task() if not task or i is None: break try: update_cache = self.process_task(task) except Exception as e: logger.error('Exception when working with pg_dist_node: %r', e) update_cache = None with self._condition: if self._tasks: if update_cache: self._pg_dist_node[task.group] = task if update_cache is False: # an indicator that process_tasks has started a transaction self._in_flight = task else: self._in_flight = None if id(self._tasks[i]) == id(task): self._tasks.pop(i) task.wakeup() def run(self) -> None: while True: try: with self._condition: if self._schedule_load_pg_dist_node: timeout = -1 elif self._in_flight: timeout = self._in_flight.deadline - time.time() if self._tasks else None else: timeout = -1 if self._tasks else None if timeout is None or timeout > 0: self._condition.wait(timeout) elif self._in_flight: logger.warning('Rolling back transaction. Last known status: %s', self._in_flight) self.query('ROLLBACK') self._in_flight = None self.process_tasks() except Exception: logger.exception('run') def _add_task(self, task: PgDistNode) -> bool: with self._condition: i = self.find_task_by_group(task.group) # The `PgDistNode.timeout` == None is an indicator that it was scheduled from the sync_pg_dist_node(). if task.timeout is None: # We don't want to override the already existing task created from REST API. if i is not None and self._tasks[i].timeout is not None: return False # There is a little race condition with tasks created from REST API - the call made "before" the member # key is updated in DCS. Therefore it is possible that :func:`sync_pg_dist_node` will try to create a # task based on the outdated values of "state"/"role". To solve it we introduce an artificial timeout. # Only when the timeout is reached new tasks could be scheduled from sync_pg_dist_node() if self._in_flight and self._in_flight.group == task.group and self._in_flight.timeout is not None\ and self._in_flight.deadline > time.time(): return False # Override already existing task for the same worker group if i is not None: if task != self._tasks[i]: logger.debug('Overriding existing task: %s != %s', self._tasks[i], task) self._tasks[i] = task self._condition.notify() return True # Add the task to the list if Worker node state is different from the cached `pg_dist_node` elif self._schedule_load_pg_dist_node or task != self._pg_dist_node.get(task.group)\ or self._in_flight and task.group == self._in_flight.group: logger.debug('Adding the new task: %s', task) self._tasks.append(task) self._condition.notify() return True return False def add_task(self, event: str, group: int, conn_url: str, timeout: Optional[float] = None, cooldown: Optional[float] = None) -> Optional[PgDistNode]: try: r = urlparse(conn_url) except Exception as e: return logger.error('Failed to parse connection url %s: %r', conn_url, e) host = r.hostname if host: port = r.port or 5432 task = PgDistNode(group, host, port, event, timeout=timeout, cooldown=cooldown) return task if self._add_task(task) else None def handle_event(self, cluster: Cluster, event: Dict[str, Any]) -> None: if not self.is_alive(): return worker = cluster.workers.get(event['group']) if not (worker and worker.leader and worker.leader.name == event['leader'] and worker.leader.conn_url): return task = self.add_task(event['type'], event['group'], worker.leader.conn_url, event['timeout'], event['cooldown'] * 1000) if task and event['type'] == 'before_demote': task.wait() def bootstrap(self) -> None: if not isinstance(self._config, dict): # self.is_enabled() return conn_kwargs = {**self._postgresql.connection_pool.conn_kwargs, 'options': '-c synchronous_commit=local -c statement_timeout=0'} if self._config['database'] != self._postgresql.database: conn = connect(**conn_kwargs) try: with conn.cursor() as cur: cur.execute('CREATE DATABASE {0}'.format( quote_ident(self._config['database'], conn)).encode('utf-8')) except ProgrammingError as exc: if exc.diag.sqlstate == '42P04': # DuplicateDatabase logger.debug('Exception when creating database: %r', exc) else: raise exc finally: conn.close() conn_kwargs['dbname'] = self._config['database'] conn = connect(**conn_kwargs) try: with conn.cursor() as cur: cur.execute('CREATE EXTENSION IF NOT EXISTS citus') superuser = self._postgresql.config.superuser params = {k: superuser[k] for k in ('password', 'sslcert', 'sslkey') if k in superuser} if params: cur.execute("INSERT INTO pg_catalog.pg_dist_authinfo VALUES" "(0, pg_catalog.current_user(), %s)", (self._postgresql.config.format_dsn(params),)) if self.is_coordinator(): r = urlparse(self._postgresql.connection_string) cur.execute("SELECT pg_catalog.citus_set_coordinator_host(%s, %s, 'primary', 'default')", (r.hostname, r.port or 5432)) finally: conn.close() def adjust_postgres_gucs(self, parameters: Dict[str, Any]) -> None: if not self.is_enabled(): return # citus extension must be on the first place in shared_preload_libraries shared_preload_libraries = list(filter( lambda el: el and el != 'citus', [p.strip() for p in parameters.get('shared_preload_libraries', '').split(',')])) parameters['shared_preload_libraries'] = ','.join(['citus'] + shared_preload_libraries) # if not explicitly set Citus overrides max_prepared_transactions to max_connections*2 if parameters['max_prepared_transactions'] == 0: parameters['max_prepared_transactions'] = parameters['max_connections'] * 2 # Resharding in Citus implemented using logical replication parameters['wal_level'] = 'logical' # Sometimes Citus needs to connect to the local postgres. We will do it the same way as Patroni does. parameters['citus.local_hostname'] = self._postgresql.connection_pool.conn_kwargs.get('host', 'localhost') def ignore_replication_slot(self, slot: Dict[str, str]) -> bool: if isinstance(self._config, dict) and self._postgresql.is_primary() and\ slot['type'] == 'logical' and slot['database'] == self._config['database']: m = CITUS_SLOT_NAME_RE.match(slot['name']) return bool(m and {'move': 'pgoutput', 'split': 'citus'}.get(m.group(1)) == slot['plugin']) return False patroni-3.2.2/patroni/postgresql/config.py000066400000000000000000001764011455170150700207060ustar00rootroot00000000000000import logging import os import re import shutil import socket import stat import time from contextlib import contextmanager from urllib.parse import urlparse, parse_qsl, unquote from types import TracebackType from typing import Any, Collection, Dict, Iterator, List, Optional, Union, Tuple, Type, TYPE_CHECKING from .validator import recovery_parameters, transform_postgresql_parameter_value, transform_recovery_parameter_value from ..collections import CaseInsensitiveDict, CaseInsensitiveSet from ..dcs import Leader, Member, RemoteMember, slot_name_from_member_name from ..exceptions import PatroniFatalException, PostgresConnectionException from ..file_perm import pg_perm from ..utils import compare_values, parse_bool, parse_int, split_host_port, uri, validate_directory, is_subpath from ..validator import IntValidator, EnumValidator if TYPE_CHECKING: # pragma: no cover from . import Postgresql logger = logging.getLogger(__name__) PARAMETER_RE = re.compile(r'([a-z_]+)\s*=\s*') def conninfo_uri_parse(dsn: str) -> Dict[str, str]: ret: Dict[str, str] = {} r = urlparse(dsn) if r.username: ret['user'] = r.username if r.password: ret['password'] = r.password if r.path[1:]: ret['dbname'] = r.path[1:] hosts: List[str] = [] ports: List[str] = [] for netloc in r.netloc.split('@')[-1].split(','): host = None if '[' in netloc and ']' in netloc: tmp = netloc.split(']') + [''] host = tmp[0][1:] netloc = ':'.join(tmp[:2]) tmp = netloc.rsplit(':', 1) if host is None: host = tmp[0] hosts.append(host) ports.append(tmp[1] if len(tmp) == 2 else '') if hosts: ret['host'] = ','.join(hosts) if ports: ret['port'] = ','.join(ports) ret = {name: unquote(value) for name, value in ret.items()} ret.update({name: value for name, value in parse_qsl(r.query)}) if ret.get('ssl') == 'true': del ret['ssl'] ret['sslmode'] = 'require' return ret def read_param_value(value: str) -> Union[Tuple[None, None], Tuple[str, int]]: length = len(value) ret = '' is_quoted = value[0] == "'" i = int(is_quoted) while i < length: if is_quoted: if value[i] == "'": return ret, i + 1 elif value[i].isspace(): break if value[i] == '\\': i += 1 if i >= length: break ret += value[i] i += 1 return (None, None) if is_quoted else (ret, i) def conninfo_parse(dsn: str) -> Optional[Dict[str, str]]: ret: Dict[str, str] = {} length = len(dsn) i = 0 while i < length: if dsn[i].isspace(): i += 1 continue param_match = PARAMETER_RE.match(dsn[i:]) if not param_match: return param = param_match.group(1) i += param_match.end() if i >= length: return value, end = read_param_value(dsn[i:]) if value is None or end is None: return i += end ret[param] = value return ret def parse_dsn(value: str) -> Optional[Dict[str, str]]: """ Very simple equivalent of `psycopg2.extensions.parse_dsn` introduced in 2.7.0. We are not using psycopg2 function in order to remain compatible with 2.5.4+. There is one minor difference though, this function removes `dbname` from the result and sets the `sslmode`, 'gssencmode', and `channel_binding` to `prefer` if it is not present in the connection string. This is necessary to simplify comparison of the old and the new values. >>> r = parse_dsn('postgresql://u%2Fse:pass@:%2f123,[::1]/db%2Fsdf?application_name=mya%2Fpp&ssl=true') >>> r == {'application_name': 'mya/pp', 'host': ',::1', 'sslmode': 'require',\ 'password': 'pass', 'port': '/123,', 'user': 'u/se', 'gssencmode': 'prefer', 'channel_binding': 'prefer'} True >>> r = parse_dsn(" host = 'host' dbname = db\\\\ name requiressl=1 ") >>> r == {'host': 'host', 'sslmode': 'require', 'gssencmode': 'prefer', 'channel_binding': 'prefer'} True >>> parse_dsn('requiressl = 0\\\\') == {'sslmode': 'prefer', 'gssencmode': 'prefer', 'channel_binding': 'prefer'} True >>> parse_dsn("host=a foo = '") is None True >>> parse_dsn("host=a foo = ") is None True >>> parse_dsn("1") is None True """ if value.startswith('postgres://') or value.startswith('postgresql://'): ret = conninfo_uri_parse(value) else: ret = conninfo_parse(value) if ret: if 'sslmode' not in ret: # allow sslmode to take precedence over requiressl requiressl = ret.pop('requiressl', None) if requiressl == '1': ret['sslmode'] = 'require' elif requiressl is not None: ret['sslmode'] = 'prefer' ret.setdefault('sslmode', 'prefer') if 'dbname' in ret: del ret['dbname'] ret.setdefault('gssencmode', 'prefer') ret.setdefault('channel_binding', 'prefer') return ret def strip_comment(value: str) -> str: i = value.find('#') if i > -1: value = value[:i].strip() return value def read_recovery_param_value(value: str) -> Optional[str]: """ >>> read_recovery_param_value('') is None True >>> read_recovery_param_value("'") is None True >>> read_recovery_param_value("''a") is None True >>> read_recovery_param_value('a b') is None True >>> read_recovery_param_value("'''") is None True >>> read_recovery_param_value("'\\\\") is None True >>> read_recovery_param_value("'a' s#") is None True >>> read_recovery_param_value("'\\\\'''' #a") "''" >>> read_recovery_param_value('asd') 'asd' """ value = value.strip() length = len(value) if length == 0: return None elif value[0] == "'": if length == 1: return None ret = '' i = 1 while i < length: if value[i] == '\\': i += 1 if i >= length: return None elif value[i] == "'": i += 1 if i >= length: break if value[i] in ('#', ' '): if strip_comment(value[i:]): return None break if value[i] != "'": return None ret += value[i] i += 1 else: return None return ret else: value = strip_comment(value) if not value or ' ' in value or '\\' in value: return None return value def mtime(filename: str) -> Optional[float]: try: return os.stat(filename).st_mtime except OSError: return None class ConfigWriter(object): def __init__(self, filename: str) -> None: self._filename = filename self._fd = None def __enter__(self) -> 'ConfigWriter': self._fd = open(self._filename, 'w') self.writeline('# Do not edit this file manually!\n# It will be overwritten by Patroni!') return self def __exit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> None: if self._fd: self._fd.close() def writeline(self, line: str) -> None: if self._fd: self._fd.write(line) self._fd.write('\n') def writelines(self, lines: List[Optional[str]]) -> None: for line in lines: if isinstance(line, str): self.writeline(line) @staticmethod def escape(value: Any) -> str: # Escape (by doubling) any single quotes or backslashes in given string return re.sub(r'([\'\\])', r'\1\1', str(value)) def write_param(self, param: str, value: Any) -> None: self.writeline("{0} = '{1}'".format(param, self.escape(value))) def _false_validator(value: Any) -> bool: return False def _bool_validator(value: Any) -> bool: return parse_bool(value) is not None def _bool_is_true_validator(value: Any) -> bool: return parse_bool(value) is True class ConfigHandler(object): # List of parameters which must be always passed to postmaster as command line options # to make it not possible to change them with 'ALTER SYSTEM'. # Some of these parameters have sane default value assigned and Patroni doesn't allow # to decrease this value. E.g. 'wal_level' can't be lower then 'hot_standby' and so on. # These parameters could be changed only globally, i.e. via DCS. # P.S. 'listen_addresses' and 'port' are added here just for convenience, to mark them # as a parameters which should always be passed through command line. # # Format: # key - parameter name # value - tuple(default_value, check_function, min_version) # default_value -- some sane default value # check_function -- if the new value is not correct must return `!False` # min_version -- major version of PostgreSQL when parameter was introduced CMDLINE_OPTIONS = CaseInsensitiveDict({ 'listen_addresses': (None, _false_validator, 90100), 'port': (None, _false_validator, 90100), 'cluster_name': (None, _false_validator, 90500), 'wal_level': ('hot_standby', EnumValidator(('hot_standby', 'replica', 'logical')), 90100), 'hot_standby': ('on', _bool_is_true_validator, 90100), 'max_connections': (100, IntValidator(min=25), 90100), 'max_wal_senders': (10, IntValidator(min=3), 90100), 'wal_keep_segments': (8, IntValidator(min=1), 90100), 'wal_keep_size': ('128MB', IntValidator(min=16, base_unit='MB'), 130000), 'max_prepared_transactions': (0, IntValidator(min=0), 90100), 'max_locks_per_transaction': (64, IntValidator(min=32), 90100), 'track_commit_timestamp': ('off', _bool_validator, 90500), 'max_replication_slots': (10, IntValidator(min=4), 90400), 'max_worker_processes': (8, IntValidator(min=2), 90400), 'wal_log_hints': ('on', _bool_is_true_validator, 90400) }) _RECOVERY_PARAMETERS = CaseInsensitiveSet(recovery_parameters.keys()) def __init__(self, postgresql: 'Postgresql', config: Dict[str, Any]) -> None: self._postgresql = postgresql self._config_dir = os.path.abspath(config.get('config_dir', '') or postgresql.data_dir) config_base_name = config.get('config_base_name', 'postgresql') self._postgresql_conf = os.path.join(self._config_dir, config_base_name + '.conf') self._postgresql_conf_mtime = None self._postgresql_base_conf_name = config_base_name + '.base.conf' self._postgresql_base_conf = os.path.join(self._config_dir, self._postgresql_base_conf_name) self._pg_hba_conf = os.path.join(self._config_dir, 'pg_hba.conf') self._pg_ident_conf = os.path.join(self._config_dir, 'pg_ident.conf') self._recovery_conf = os.path.join(postgresql.data_dir, 'recovery.conf') self._recovery_conf_mtime = None self._recovery_signal = os.path.join(postgresql.data_dir, 'recovery.signal') self._standby_signal = os.path.join(postgresql.data_dir, 'standby.signal') self._auto_conf = os.path.join(postgresql.data_dir, 'postgresql.auto.conf') self._auto_conf_mtime = None self._pgpass = os.path.abspath(config.get('pgpass') or os.path.join(os.path.expanduser('~'), 'pgpass')) if os.path.exists(self._pgpass) and not os.path.isfile(self._pgpass): raise PatroniFatalException("'{0}' exists and it's not a file, check your `postgresql.pgpass` configuration" .format(self._pgpass)) self._passfile = None self._passfile_mtime = None self._postmaster_ctime = None self._current_recovery_params: Optional[CaseInsensitiveDict] = None self._config = {} self._recovery_params = CaseInsensitiveDict() self._server_parameters: CaseInsensitiveDict = CaseInsensitiveDict() self.reload_config(config) def load_current_server_parameters(self) -> None: """Read GUC's values from ``pg_settings`` when Patroni is joining the the postgres that is already running.""" exclude = [name.lower() for name, value in self.CMDLINE_OPTIONS.items() if value[1] == _false_validator] keep_values = {k: self._server_parameters[k] for k in exclude} server_parameters = CaseInsensitiveDict({r[0]: r[1] for r in self._postgresql.query( "SELECT name, pg_catalog.current_setting(name) FROM pg_catalog.pg_settings" " WHERE (source IN ('command line', 'environment variable') OR sourcefile = %s)" " AND pg_catalog.lower(name) != ALL(%s)", self._postgresql_conf, exclude)}) recovery_params = CaseInsensitiveDict({k: server_parameters.pop(k) for k in self._RECOVERY_PARAMETERS if k in server_parameters}) # We also want to load current settings of recovery parameters, including primary_conninfo # and primary_slot_name, otherwise patronictl restart will update postgresql.conf # and remove them, what in the worst case will cause another restart. # We are doing it only for PostgresSQL v12 onwards, because older version still have recovery.conf if not self._postgresql.is_primary() and self._postgresql.major_version >= 120000: # primary_conninfo is expected to be a dict, therefore we need to parse it recovery_params['primary_conninfo'] = parse_dsn(recovery_params.pop('primary_conninfo', '')) or {} self._recovery_params = recovery_params self._server_parameters = CaseInsensitiveDict({**server_parameters, **keep_values}) def setup_server_parameters(self) -> None: self._server_parameters = self.get_server_parameters(self._config) self._adjust_recovery_parameters() def try_to_create_dir(self, d: str, msg: str) -> None: d = os.path.join(self._postgresql.data_dir, d) if (not is_subpath(self._postgresql.data_dir, d) or not self._postgresql.data_directory_empty()): validate_directory(d, msg) def check_directories(self) -> None: if "unix_socket_directories" in self._server_parameters: for d in self._server_parameters["unix_socket_directories"].split(","): self.try_to_create_dir(d.strip(), "'{}' is defined in unix_socket_directories, {}") if "stats_temp_directory" in self._server_parameters: self.try_to_create_dir(self._server_parameters["stats_temp_directory"], "'{}' is defined in stats_temp_directory, {}") if not self._krbsrvname: self.try_to_create_dir(os.path.dirname(self._pgpass), "'{}' is defined in `postgresql.pgpass`, {}") @property def config_dir(self) -> str: return self._config_dir @property def _configuration_to_save(self) -> List[str]: configuration = [os.path.basename(self._postgresql_conf)] if 'custom_conf' not in self._config: configuration.append(os.path.basename(self._postgresql_base_conf_name)) if not self.hba_file: configuration.append('pg_hba.conf') if not self.ident_file: configuration.append('pg_ident.conf') return configuration def set_file_permissions(self, filename: str) -> None: """Set permissions of file *filename* according to the expected permissions if it resides under PGDATA. .. note:: Do nothing if the file is not under PGDATA. :param filename: path to a file which permissions might need to be adjusted. """ if is_subpath(self._postgresql.data_dir, filename): pg_perm.set_permissions_from_data_directory(self._postgresql.data_dir) os.chmod(filename, pg_perm.file_create_mode) @contextmanager def config_writer(self, filename: str) -> Iterator[ConfigWriter]: """Create :class:`ConfigWriter` object and set permissions on a *filename*. :param filename: path to a config file. :yields: :class:`ConfigWriter` object. """ with ConfigWriter(filename) as writer: yield writer self.set_file_permissions(filename) def save_configuration_files(self, check_custom_bootstrap: bool = False) -> bool: """ copy postgresql.conf to postgresql.conf.backup to be able to retrieve configuration files - originally stored as symlinks, those are normally skipped by pg_basebackup - in case of WAL-E basebackup (see http://comments.gmane.org/gmane.comp.db.postgresql.wal-e/239) """ if not (check_custom_bootstrap and self._postgresql.bootstrap.running_custom_bootstrap): try: for f in self._configuration_to_save: config_file = os.path.join(self._config_dir, f) backup_file = os.path.join(self._postgresql.data_dir, f + '.backup') if os.path.isfile(config_file): shutil.copy(config_file, backup_file) self.set_file_permissions(backup_file) except IOError: logger.exception('unable to create backup copies of configuration files') return True def restore_configuration_files(self) -> None: """ restore a previously saved postgresql.conf """ try: for f in self._configuration_to_save: config_file = os.path.join(self._config_dir, f) backup_file = os.path.join(self._postgresql.data_dir, f + '.backup') if not os.path.isfile(config_file): if os.path.isfile(backup_file): shutil.copy(backup_file, config_file) self.set_file_permissions(config_file) # Previously we didn't backup pg_ident.conf, if file is missing just create empty elif f == 'pg_ident.conf': open(config_file, 'w').close() self.set_file_permissions(config_file) except IOError: logger.exception('unable to restore configuration files from backup') def write_postgresql_conf(self, configuration: Optional[CaseInsensitiveDict] = None) -> None: # rename the original configuration if it is necessary if 'custom_conf' not in self._config and not os.path.exists(self._postgresql_base_conf): os.rename(self._postgresql_conf, self._postgresql_base_conf) configuration = configuration or self._server_parameters.copy() # Due to the permanent logical replication slots configured we have to enable hot_standby_feedback if self._postgresql.enforce_hot_standby_feedback: configuration['hot_standby_feedback'] = 'on' with self.config_writer(self._postgresql_conf) as f: include = self._config.get('custom_conf') or self._postgresql_base_conf_name f.writeline("include '{0}'\n".format(ConfigWriter.escape(include))) for name, value in sorted((configuration).items()): value = transform_postgresql_parameter_value(self._postgresql.major_version, name, value, self._postgresql.available_gucs) if value is not None and\ (name != 'hba_file' or not self._postgresql.bootstrap.running_custom_bootstrap): f.write_param(name, value) # when we are doing custom bootstrap we assume that we don't know superuser password # and in order to be able to change it, we are opening trust access from a certain address # therefore we need to make sure that hba_file is not overridden # after changing superuser password we will "revert" all these "changes" if self._postgresql.bootstrap.running_custom_bootstrap or 'hba_file' not in self._server_parameters: f.write_param('hba_file', self._pg_hba_conf) if 'ident_file' not in self._server_parameters: f.write_param('ident_file', self._pg_ident_conf) if self._postgresql.major_version >= 120000: if self._recovery_params: f.writeline('\n# recovery.conf') self._write_recovery_params(f, self._recovery_params) if not self._postgresql.bootstrap.keep_existing_recovery_conf: self._sanitize_auto_conf() def append_pg_hba(self, config: List[str]) -> bool: if not self.hba_file and not self._config.get('pg_hba'): with open(self._pg_hba_conf, 'a') as f: f.write('\n{}\n'.format('\n'.join(config))) self.set_file_permissions(self._pg_hba_conf) return True def replace_pg_hba(self) -> Optional[bool]: """ Replace pg_hba.conf content in the PGDATA if hba_file is not defined in the `postgresql.parameters` and pg_hba is defined in `postgresql` configuration section. :returns: True if pg_hba.conf was rewritten. """ # when we are doing custom bootstrap we assume that we don't know superuser password # and in order to be able to change it, we are opening trust access from a certain address if self._postgresql.bootstrap.running_custom_bootstrap: addresses = {} if os.name == 'nt' else {'': 'local'} # windows doesn't yet support unix-domain sockets if 'host' in self.local_replication_address and not self.local_replication_address['host'].startswith('/'): addresses.update({sa[0] + '/32': 'host' for _, _, _, _, sa in socket.getaddrinfo( self.local_replication_address['host'], self.local_replication_address['port'], 0, socket.SOCK_STREAM, socket.IPPROTO_TCP)}) with self.config_writer(self._pg_hba_conf) as f: for address, t in addresses.items(): f.writeline(( '{0}\treplication\t{1}\t{3}\ttrust\n' '{0}\tall\t{2}\t{3}\ttrust' ).format(t, self.replication['username'], self._superuser.get('username') or 'all', address)) elif not self.hba_file and self._config.get('pg_hba'): with self.config_writer(self._pg_hba_conf) as f: f.writelines(self._config['pg_hba']) return True def replace_pg_ident(self) -> Optional[bool]: """ Replace pg_ident.conf content in the PGDATA if ident_file is not defined in the `postgresql.parameters` and pg_ident is defined in the `postgresql` section. :returns: True if pg_ident.conf was rewritten. """ if not self.ident_file and self._config.get('pg_ident'): with self.config_writer(self._pg_ident_conf) as f: f.writelines(self._config['pg_ident']) return True def primary_conninfo_params(self, member: Union[Leader, Member, None]) -> Optional[Dict[str, Any]]: if not member or not member.conn_url or member.name == self._postgresql.name: return None ret = member.conn_kwargs(self.replication) ret['application_name'] = self._postgresql.name ret.setdefault('sslmode', 'prefer') if self._postgresql.major_version >= 120000: ret.setdefault('gssencmode', 'prefer') if self._postgresql.major_version >= 130000: ret.setdefault('channel_binding', 'prefer') if self._krbsrvname: ret['krbsrvname'] = self._krbsrvname if 'dbname' in ret: del ret['dbname'] return ret def format_dsn(self, params: Dict[str, Any], include_dbname: bool = False) -> str: # A list of keywords that can be found in a conninfo string. Follows what is acceptable by libpq keywords = ('dbname', 'user', 'passfile' if params.get('passfile') else 'password', 'host', 'port', 'sslmode', 'sslcompression', 'sslcert', 'sslkey', 'sslpassword', 'sslrootcert', 'sslcrl', 'sslcrldir', 'application_name', 'krbsrvname', 'gssencmode', 'channel_binding', 'target_session_attrs') if include_dbname: params = params.copy() if 'dbname' not in params: params['dbname'] = self._postgresql.database # we are abusing information about the necessity of dbname # dsn should contain passfile or password only if there is no dbname in it (it is used in recovery.conf) skip = {'passfile', 'password'} else: skip = {'dbname'} def escape(value: Any) -> str: return re.sub(r'([\'\\ ])', r'\\\1', str(value)) return ' '.join('{0}={1}'.format(kw, escape(params[kw])) for kw in keywords if kw not in skip and params.get(kw) is not None) def _write_recovery_params(self, fd: ConfigWriter, recovery_params: CaseInsensitiveDict) -> None: if self._postgresql.major_version >= 90500: pause_at_recovery_target = parse_bool(recovery_params.pop('pause_at_recovery_target', None)) if pause_at_recovery_target is not None: recovery_params.setdefault('recovery_target_action', 'pause' if pause_at_recovery_target else 'promote') else: if str(recovery_params.pop('recovery_target_action', None)).lower() == 'promote': recovery_params.setdefault('pause_at_recovery_target', 'false') for name, value in sorted(recovery_params.items()): if name == 'primary_conninfo': if 'password' in value and self._postgresql.major_version >= 100000: self.write_pgpass(value) value['passfile'] = self._passfile = self._pgpass self._passfile_mtime = mtime(self._pgpass) value = self.format_dsn(value) else: value = transform_recovery_parameter_value(self._postgresql.major_version, name, value, self._postgresql.available_gucs) if value is None: continue fd.write_param(name, value) def build_recovery_params(self, member: Union[Leader, Member, None]) -> CaseInsensitiveDict: recovery_params = CaseInsensitiveDict({p: v for p, v in (self.get('recovery_conf') or {}).items() if not p.lower().startswith('recovery_target') and p.lower() not in ('primary_conninfo', 'primary_slot_name')}) recovery_params.update({'standby_mode': 'on', 'recovery_target_timeline': 'latest'}) if self._postgresql.major_version >= 120000: # on pg12 we want to protect from following params being set in one of included files # not doing so might result in a standby being paused, promoted or shutted down. recovery_params.update({'recovery_target': '', 'recovery_target_name': '', 'recovery_target_time': '', 'recovery_target_xid': '', 'recovery_target_lsn': ''}) is_remote_member = isinstance(member, RemoteMember) primary_conninfo = self.primary_conninfo_params(member) if primary_conninfo: use_slots = self.get('use_slots', True) and self._postgresql.major_version >= 90400 if use_slots and not (is_remote_member and member.no_replication_slot): primary_slot_name = member.primary_slot_name if is_remote_member else self._postgresql.name recovery_params['primary_slot_name'] = slot_name_from_member_name(primary_slot_name) # We are a standby leader and are using a replication slot. Make sure we connect to # the leader of the main cluster (in case more than one host is specified in the # connstr) by adding 'target_session_attrs=read-write' to primary_conninfo. if is_remote_member and 'target_sesions_attrs' not in primary_conninfo and\ self._postgresql.major_version >= 100000: primary_conninfo['target_session_attrs'] = 'read-write' recovery_params['primary_conninfo'] = primary_conninfo # standby_cluster config might have different parameters, we want to override them standby_cluster_params = ['restore_command', 'archive_cleanup_command']\ + (['recovery_min_apply_delay'] if is_remote_member else []) recovery_params.update({p: member.data.get(p) for p in standby_cluster_params if member and member.data.get(p)}) return recovery_params def recovery_conf_exists(self) -> bool: if self._postgresql.major_version >= 120000: return os.path.exists(self._standby_signal) or os.path.exists(self._recovery_signal) return os.path.exists(self._recovery_conf) @property def triggerfile_good_name(self) -> str: return 'trigger_file' if self._postgresql.major_version < 120000 else 'promote_trigger_file' @property def _triggerfile_wrong_name(self) -> str: return 'trigger_file' if self._postgresql.major_version >= 120000 else 'promote_trigger_file' @property def _recovery_parameters_to_compare(self) -> CaseInsensitiveSet: skip_params = CaseInsensitiveSet({'pause_at_recovery_target', 'recovery_target_inclusive', 'recovery_target_action', 'standby_mode', self._triggerfile_wrong_name}) return CaseInsensitiveSet(self._RECOVERY_PARAMETERS - skip_params) def _read_recovery_params(self) -> Tuple[Optional[CaseInsensitiveDict], bool]: """Read current recovery parameters values. .. note:: We query Postgres only if we detected that Postgresql was restarted or when at least one of the following files was updated: * ``postgresql.conf``; * ``postgresql.auto.conf``; * ``passfile`` that is used in the ``primary_conninfo``. :returns: a tuple with two elements: * :class:`CaseInsensitiveDict` object with current values of recovery parameters, or ``None`` if no configuration files were updated; * ``True`` if new values of recovery parameters were queried, ``False`` otherwise. """ if self._postgresql.is_starting(): return None, False pg_conf_mtime = mtime(self._postgresql_conf) auto_conf_mtime = mtime(self._auto_conf) passfile_mtime = mtime(self._passfile) if self._passfile else False postmaster_ctime = self._postgresql.is_running() if postmaster_ctime: postmaster_ctime = postmaster_ctime.create_time() if self._postgresql_conf_mtime == pg_conf_mtime and self._auto_conf_mtime == auto_conf_mtime \ and self._passfile_mtime == passfile_mtime and self._postmaster_ctime == postmaster_ctime: return None, False try: values = self._get_pg_settings(self._recovery_parameters_to_compare).values() values = CaseInsensitiveDict({p[0]: [p[1], p[4] == 'postmaster', p[5]] for p in values}) self._postgresql_conf_mtime = pg_conf_mtime self._auto_conf_mtime = auto_conf_mtime self._postmaster_ctime = postmaster_ctime except Exception as exc: if all((isinstance(exc, PostgresConnectionException), self._postgresql_conf_mtime == pg_conf_mtime, self._auto_conf_mtime == auto_conf_mtime, self._passfile_mtime == passfile_mtime, self._postmaster_ctime != postmaster_ctime)): # We detected that the connection to postgres fails, but the process creation time of the postmaster # doesn't match the old value. It is an indicator that Postgres crashed and either doing crash # recovery or down. In this case we return values like nothing changed in the config. return None, False values = None return values, True def _read_recovery_params_pre_v12(self) -> Tuple[Optional[CaseInsensitiveDict], bool]: recovery_conf_mtime = mtime(self._recovery_conf) passfile_mtime = mtime(self._passfile) if self._passfile else False if recovery_conf_mtime == self._recovery_conf_mtime and passfile_mtime == self._passfile_mtime: return None, False values = CaseInsensitiveDict() with open(self._recovery_conf, 'r') as f: for line in f: line = line.strip() if not line or line.startswith('#'): continue value = None match = PARAMETER_RE.match(line) if match: value = read_recovery_param_value(line[match.end():]) if match is None or value is None: return None, True values[match.group(1)] = [value, True] self._recovery_conf_mtime = recovery_conf_mtime values.setdefault('recovery_min_apply_delay', ['0', True]) values['recovery_min_apply_delay'][0] = parse_int(values['recovery_min_apply_delay'][0], 'ms') values.update({param: ['', True] for param in self._recovery_parameters_to_compare if param not in values}) return values, True def _check_passfile(self, passfile: str, wanted_primary_conninfo: Dict[str, Any]) -> bool: # If there is a passfile in the primary_conninfo try to figure out that # the passfile contains the line(s) allowing connection to the given node. # We assume that the passfile was created by Patroni and therefore doing # the full match and not covering cases when host, port or user are set to '*' passfile_mtime = mtime(passfile) if passfile_mtime: try: with open(passfile) as f: wanted_lines = (self._pgpass_line(wanted_primary_conninfo) or '').splitlines() file_lines = f.read().splitlines() if set(wanted_lines) == set(file_lines): self._passfile = passfile self._passfile_mtime = passfile_mtime return True except Exception: logger.info('Failed to read %s', passfile) return False def _check_primary_conninfo(self, primary_conninfo: Dict[str, Any], wanted_primary_conninfo: Dict[str, Any]) -> bool: # first we will cover corner cases, when we are replicating from somewhere while shouldn't # or there is no primary_conninfo but we should replicate from some specific node. if not wanted_primary_conninfo: return not primary_conninfo elif not primary_conninfo: return False if not self._postgresql.is_starting(): wal_receiver_primary_conninfo = self._postgresql.primary_conninfo() if wal_receiver_primary_conninfo: wal_receiver_primary_conninfo = parse_dsn(wal_receiver_primary_conninfo) # when wal receiver is alive use primary_conninfo from pg_stat_wal_receiver for comparison if wal_receiver_primary_conninfo: primary_conninfo = wal_receiver_primary_conninfo # There could be no password in the primary_conninfo or it is masked. # Just copy the "desired" value in order to make comparison succeed. if 'password' in wanted_primary_conninfo: primary_conninfo['password'] = wanted_primary_conninfo['password'] if 'passfile' in primary_conninfo and 'password' not in primary_conninfo \ and 'password' in wanted_primary_conninfo: if self._check_passfile(primary_conninfo['passfile'], wanted_primary_conninfo): primary_conninfo['password'] = wanted_primary_conninfo['password'] else: return False return all(str(primary_conninfo.get(p)) == str(v) for p, v in wanted_primary_conninfo.items() if v is not None) def check_recovery_conf(self, member: Union[Leader, Member, None]) -> Tuple[bool, bool]: """Returns a tuple. The first boolean element indicates that recovery params don't match and the second is set to `True` if the restart is required in order to apply new values""" # TODO: recovery.conf could be stale, would be nice to detect that. if self._postgresql.major_version >= 120000: if not os.path.exists(self._standby_signal): return True, True _read_recovery_params = self._read_recovery_params else: if not self.recovery_conf_exists(): return True, True _read_recovery_params = self._read_recovery_params_pre_v12 params, updated = _read_recovery_params() # updated indicates that mtime of postgresql.conf, postgresql.auto.conf, or recovery.conf # was changed and params were read either from the config or from the database connection. if updated: if params is None: # exception or unparsable config return True, True # We will cache parsed value until the next config change. self._current_recovery_params = params primary_conninfo = params['primary_conninfo'] if primary_conninfo[0]: primary_conninfo[0] = parse_dsn(params['primary_conninfo'][0]) # If we failed to parse non-empty connection string this indicates that config if broken. if not primary_conninfo[0]: return True, True else: # empty string, primary_conninfo is not in the config primary_conninfo[0] = {} if not self._postgresql.is_starting() and self._current_recovery_params: # when wal receiver is alive take primary_slot_name from pg_stat_wal_receiver wal_receiver_primary_slot_name = self._postgresql.primary_slot_name() if not wal_receiver_primary_slot_name and self._postgresql.primary_conninfo(): wal_receiver_primary_slot_name = '' if wal_receiver_primary_slot_name is not None: self._current_recovery_params['primary_slot_name'][0] = wal_receiver_primary_slot_name # Increment the 'reload' to enforce write of postgresql.conf when joining the running postgres required = {'restart': 0, 'reload': int(self._postgresql.major_version >= 120000 and not self._postgresql.cb_called and not self._postgresql.is_starting())} def record_missmatch(mtype: bool) -> None: required['restart' if mtype else 'reload'] += 1 wanted_recovery_params = self.build_recovery_params(member) for param, value in (self._current_recovery_params or {}).items(): # Skip certain parameters defined in the included postgres config files # if we know that they are not specified in the patroni configuration. if len(value) > 2 and value[2] not in (self._postgresql_conf, self._auto_conf) and \ param in ('archive_cleanup_command', 'promote_trigger_file', 'recovery_end_command', 'recovery_min_apply_delay', 'restore_command') and param not in wanted_recovery_params: continue if param == 'recovery_min_apply_delay': if not compare_values('integer', 'ms', value[0], wanted_recovery_params.get(param, 0)): record_missmatch(value[1]) elif param == 'standby_mode': if not compare_values('bool', None, value[0], wanted_recovery_params.get(param, 'on')): record_missmatch(value[1]) elif param == 'primary_conninfo': if not self._check_primary_conninfo(value[0], wanted_recovery_params.get('primary_conninfo', {})): record_missmatch(value[1]) elif (param != 'primary_slot_name' or wanted_recovery_params.get('primary_conninfo')) \ and str(value[0]) != str(wanted_recovery_params.get(param, '')): record_missmatch(value[1]) return required['restart'] + required['reload'] > 0, required['restart'] > 0 @staticmethod def _remove_file_if_exists(name: str) -> None: if os.path.isfile(name) or os.path.islink(name): os.unlink(name) @staticmethod def _pgpass_line(record: Dict[str, Any]) -> Optional[str]: if 'password' in record: def escape(value: Any) -> str: return re.sub(r'([:\\])', r'\\\1', str(value)) record = {n: escape(record.get(n) or '*') for n in ('host', 'port', 'user', 'password')} # 'host' could be several comma-separated hostnames, in this case # we need to write on pgpass line per host line = '' for hostname in record['host'].split(','): line += hostname + ':{port}:*:{user}:{password}'.format(**record) + '\n' return line.rstrip() def write_pgpass(self, record: Dict[str, Any]) -> Dict[str, str]: line = self._pgpass_line(record) if not line: return os.environ.copy() with open(self._pgpass, 'w') as f: os.chmod(self._pgpass, stat.S_IWRITE | stat.S_IREAD) f.write(line) return {**os.environ, 'PGPASSFILE': self._pgpass} def write_recovery_conf(self, recovery_params: CaseInsensitiveDict) -> None: self._recovery_params = recovery_params if self._postgresql.major_version >= 120000: if parse_bool(recovery_params.pop('standby_mode', None)): open(self._standby_signal, 'w').close() self.set_file_permissions(self._standby_signal) else: self._remove_file_if_exists(self._standby_signal) open(self._recovery_signal, 'w').close() self.set_file_permissions(self._recovery_signal) def restart_required(name: str) -> bool: if self._postgresql.major_version >= 140000: return False return name == 'restore_command' or (self._postgresql.major_version < 130000 and name in ('primary_conninfo', 'primary_slot_name')) self._current_recovery_params = CaseInsensitiveDict({n: [v, restart_required(n), self._postgresql_conf] for n, v in recovery_params.items()}) else: with self.config_writer(self._recovery_conf) as f: self._write_recovery_params(f, recovery_params) def remove_recovery_conf(self) -> None: for name in (self._recovery_conf, self._standby_signal, self._recovery_signal): self._remove_file_if_exists(name) self._recovery_params = CaseInsensitiveDict() self._current_recovery_params = None def _sanitize_auto_conf(self) -> None: overwrite = False lines: List[str] = [] if os.path.exists(self._auto_conf): try: with open(self._auto_conf) as f: for raw_line in f: line = raw_line.strip() match = PARAMETER_RE.match(line) if match and match.group(1).lower() in self._RECOVERY_PARAMETERS: overwrite = True else: lines.append(raw_line) except Exception: logger.info('Failed to read %s', self._auto_conf) if overwrite: try: with open(self._auto_conf, 'w') as f: self.set_file_permissions(self._auto_conf) for raw_line in lines: f.write(raw_line) except Exception: logger.exception('Failed to remove some unwanted parameters from %s', self._auto_conf) def _adjust_recovery_parameters(self) -> None: # It is not strictly necessary, but we can make patroni configs crossi-compatible with all postgres versions. recovery_conf = {n: v for n, v in self._server_parameters.items() if n.lower() in self._RECOVERY_PARAMETERS} if recovery_conf: self._config['recovery_conf'] = recovery_conf if self.get('recovery_conf'): value = self._config['recovery_conf'].pop(self._triggerfile_wrong_name, None) if self.triggerfile_good_name not in self._config['recovery_conf'] and value: self._config['recovery_conf'][self.triggerfile_good_name] = value def get_server_parameters(self, config: Dict[str, Any]) -> CaseInsensitiveDict: parameters = config['parameters'].copy() listen_addresses, port = split_host_port(config['listen'], 5432) parameters.update(cluster_name=self._postgresql.scope, listen_addresses=listen_addresses, port=str(port)) if not self._postgresql.global_config or self._postgresql.global_config.is_synchronous_mode: synchronous_standby_names = self._server_parameters.get('synchronous_standby_names') if synchronous_standby_names is None: if self._postgresql.global_config and self._postgresql.global_config.is_synchronous_mode_strict\ and self._postgresql.role in ('master', 'primary', 'promoted'): parameters['synchronous_standby_names'] = '*' else: parameters.pop('synchronous_standby_names', None) else: parameters['synchronous_standby_names'] = synchronous_standby_names # Handle hot_standby <-> replica rename if parameters.get('wal_level') == ('hot_standby' if self._postgresql.major_version >= 90600 else 'replica'): parameters['wal_level'] = 'replica' if self._postgresql.major_version >= 90600 else 'hot_standby' # Try to recalcualte wal_keep_segments <-> wal_keep_size assuming that typical wal_segment_size is 16MB. # The real segment size could be estimated from pg_control, but we don't really care, because the only goal of # this exercise is improving cross version compatibility and user must set the correct parameter in the config. if self._postgresql.major_version >= 130000: wal_keep_segments = parameters.pop('wal_keep_segments', self.CMDLINE_OPTIONS['wal_keep_segments'][0]) parameters.setdefault('wal_keep_size', str(int(wal_keep_segments) * 16) + 'MB') elif self._postgresql.major_version: wal_keep_size = parse_int(parameters.pop('wal_keep_size', self.CMDLINE_OPTIONS['wal_keep_size'][0]), 'MB') parameters.setdefault('wal_keep_segments', int(((wal_keep_size or 0) + 8) / 16)) self._postgresql.citus_handler.adjust_postgres_gucs(parameters) ret = CaseInsensitiveDict({k: v for k, v in parameters.items() if not self._postgresql.major_version or self._postgresql.major_version >= self.CMDLINE_OPTIONS.get(k, (0, 1, 90100))[2]}) ret.update({k: os.path.join(self._config_dir, ret[k]) for k in ('hba_file', 'ident_file') if k in ret}) return ret @staticmethod def _get_unix_local_address(unix_socket_directories: str) -> str: for d in unix_socket_directories.split(','): d = d.strip() if d.startswith('/'): # Only absolute path can be used to connect via unix-socket return d return '' def _get_tcp_local_address(self) -> str: listen_addresses = self._server_parameters['listen_addresses'].split(',') for la in listen_addresses: if la.strip().lower() in ('*', '0.0.0.0', '127.0.0.1', 'localhost'): # we are listening on '*' or localhost return 'localhost' # connection via localhost is preferred return listen_addresses[0].strip() # can't use localhost, take first address from listen_addresses def resolve_connection_addresses(self) -> None: """Calculates and sets local and remote connection urls and options. This method sets: * :attr:`Postgresql.connection_string ` attribute, which is later written to the member key in DCS as ``conn_url``. * :attr:`ConfigHandler.local_replication_address` attribute, which is used for replication connections to local postgres. * :attr:`ConnectionPool.conn_kwargs ` attribute, which is used for superuser connections to local postgres. .. note:: If there is a valid directory in ``postgresql.parameters.unix_socket_directories`` in the Patroni configuration and ``postgresql.use_unix_socket`` and/or ``postgresql.use_unix_socket_repl`` are set to ``True``, we respectively use unix sockets for superuser and replication connections to local postgres. If there is a requirement to use unix sockets, but nothing is set in the ``postgresql.parameters.unix_socket_directories``, we omit a ``host`` in connection parameters relying on the ability of ``libpq`` to connect via some default unix socket directory. If unix sockets are not requested we "switch" to TCP, prefering to use ``localhost`` if it is possible to deduce that Postgres is listening on a local interface address. Otherwise we just used the first address specified in the ``listen_addresses`` GUC. """ port = self._server_parameters['port'] tcp_local_address = self._get_tcp_local_address() netloc = self._config.get('connect_address') or tcp_local_address + ':' + port unix_local_address = {'port': port} unix_socket_directories = self._server_parameters.get('unix_socket_directories') if unix_socket_directories is not None: # fallback to tcp if unix_socket_directories is set, but there are no suitable values unix_local_address['host'] = self._get_unix_local_address(unix_socket_directories) or tcp_local_address tcp_local_address = {'host': tcp_local_address, 'port': port} self.local_replication_address = unix_local_address\ if self._config.get('use_unix_socket_repl') else tcp_local_address self._postgresql.connection_string = uri('postgres', netloc, self._postgresql.database) local_address = unix_local_address if self._config.get('use_unix_socket') else tcp_local_address local_conn_kwargs = { **local_address, **self._superuser, 'dbname': self._postgresql.database, 'fallback_application_name': 'Patroni', 'connect_timeout': 3, 'options': '-c statement_timeout=2000' } # if the "username" parameter is present, it actually needs to be "user" for connecting to PostgreSQL if 'username' in local_conn_kwargs: local_conn_kwargs['user'] = local_conn_kwargs.pop('username') # "notify" connection_pool about the "new" local connection address self._postgresql.connection_pool.conn_kwargs = local_conn_kwargs def _get_pg_settings(self, names: Collection[str]) -> Dict[Any, Tuple[Any, ...]]: return {r[0]: r for r in self._postgresql.query(('SELECT name, setting, unit, vartype, context, sourcefile' + ' FROM pg_catalog.pg_settings ' + ' WHERE pg_catalog.lower(name) = ANY(%s)'), [n.lower() for n in names])} @staticmethod def _handle_wal_buffers(old_values: Dict[Any, Tuple[Any, ...]], changes: CaseInsensitiveDict) -> None: wal_block_size = parse_int(old_values['wal_block_size'][1]) or 8192 wal_segment_size = old_values['wal_segment_size'] wal_segment_unit = parse_int(wal_segment_size[2], 'B') or 8192 \ if wal_segment_size[2] is not None and wal_segment_size[2][0].isdigit() else 1 wal_segment_size = parse_int(wal_segment_size[1]) or (16777216 if wal_segment_size[2] is None else 2048) wal_segment_size *= wal_segment_unit / wal_block_size default_wal_buffers = min(max((parse_int(old_values['shared_buffers'][1]) or 16384) / 32, 8), wal_segment_size) wal_buffers = old_values['wal_buffers'] new_value = str(changes['wal_buffers'] or -1) new_value = default_wal_buffers if new_value == '-1' else parse_int(new_value, wal_buffers[2]) old_value = default_wal_buffers if wal_buffers[1] == '-1' else parse_int(*wal_buffers[1:3]) if new_value == old_value: del changes['wal_buffers'] def reload_config(self, config: Dict[str, Any], sighup: bool = False) -> None: self._superuser = config['authentication'].get('superuser', {}) server_parameters = self.get_server_parameters(config) params_skip_changes = CaseInsensitiveSet((*self._RECOVERY_PARAMETERS, 'hot_standby', 'wal_log_hints')) conf_changed = hba_changed = ident_changed = local_connection_address_changed = pending_restart = False if self._postgresql.state == 'running': changes = CaseInsensitiveDict({p: v for p, v in server_parameters.items() if p not in params_skip_changes}) changes.update({p: None for p in self._server_parameters.keys() if not (p in changes or p in params_skip_changes)}) if changes: undef = [] if 'wal_buffers' in changes: # we need to calculate the default value of wal_buffers undef = [p for p in ('shared_buffers', 'wal_segment_size', 'wal_block_size') if p not in changes] changes.update({p: None for p in undef}) # XXX: query can raise an exception old_values = self._get_pg_settings(changes.keys()) if 'wal_buffers' in changes: self._handle_wal_buffers(old_values, changes) for p in undef: del changes[p] for r in old_values.values(): if r[4] != 'internal' and r[0] in changes: new_value = changes.pop(r[0]) if new_value is None or not compare_values(r[3], r[2], r[1], new_value): conf_changed = True if r[4] == 'postmaster': pending_restart = True logger.info('Changed %s from %s to %s (restart might be required)', r[0], r[1], new_value) if config.get('use_unix_socket') and r[0] == 'unix_socket_directories'\ or r[0] in ('listen_addresses', 'port'): local_connection_address_changed = True else: logger.info('Changed %s from %s to %s', r[0], r[1], new_value) elif r[0] in self._server_parameters \ and not compare_values(r[3], r[2], r[1], self._server_parameters[r[0]]): # Check if any parameter was set back to the current pg_settings value # We can use pg_settings value here, as it is proved to be equal to new_value logger.info('Changed %s from %s to %s', r[0], self._server_parameters[r[0]], r[1]) conf_changed = True for param, value in changes.items(): if '.' in param: # Check that user-defined-paramters have changed (parameters with period in name) if value is None or param not in self._server_parameters \ or str(value) != str(self._server_parameters[param]): logger.info('Changed %s from %s to %s', param, self._server_parameters.get(param), value) conf_changed = True elif param in server_parameters: logger.warning('Removing invalid parameter `%s` from postgresql.parameters', param) server_parameters.pop(param) if (not server_parameters.get('hba_file') or server_parameters['hba_file'] == self._pg_hba_conf) \ and config.get('pg_hba'): hba_changed = self._config.get('pg_hba', []) != config['pg_hba'] if (not server_parameters.get('ident_file') or server_parameters['ident_file'] == self._pg_hba_conf) \ and config.get('pg_ident'): ident_changed = self._config.get('pg_ident', []) != config['pg_ident'] self._config = config self._postgresql.set_pending_restart(pending_restart) self._server_parameters = server_parameters self._adjust_recovery_parameters() self._krbsrvname = config.get('krbsrvname') # for not so obvious connection attempts that may happen outside of pyscopg2 if self._krbsrvname: os.environ['PGKRBSRVNAME'] = self._krbsrvname if not local_connection_address_changed: self.resolve_connection_addresses() proxy_addr = config.get('proxy_address') self._postgresql.proxy_url = uri('postgres', proxy_addr, self._postgresql.database) if proxy_addr else None if conf_changed: self.write_postgresql_conf() if hba_changed: self.replace_pg_hba() if ident_changed: self.replace_pg_ident() if sighup or conf_changed or hba_changed or ident_changed: logger.info('Reloading PostgreSQL configuration.') self._postgresql.reload() if self._postgresql.major_version >= 90500: time.sleep(1) try: pending_restart = self._postgresql.query( 'SELECT COUNT(*) FROM pg_catalog.pg_settings' ' WHERE pg_catalog.lower(name) != ALL(%s) AND pending_restart', [n.lower() for n in params_skip_changes])[0][0] > 0 self._postgresql.set_pending_restart(pending_restart) except Exception as e: logger.warning('Exception %r when running query', e) else: logger.info('No PostgreSQL configuration items changed, nothing to reload.') def set_synchronous_standby_names(self, value: Optional[str]) -> Optional[bool]: """Updates synchronous_standby_names and reloads if necessary. :returns: True if value was updated.""" if value != self._server_parameters.get('synchronous_standby_names'): if value is None: self._server_parameters.pop('synchronous_standby_names', None) else: self._server_parameters['synchronous_standby_names'] = value if self._postgresql.state == 'running': self.write_postgresql_conf() self._postgresql.reload() return True @property def effective_configuration(self) -> CaseInsensitiveDict: """It might happen that the current value of one (or more) below parameters stored in the controldata is higher than the value stored in the global cluster configuration. Example: max_connections in global configuration is 100, but in controldata `Current max_connections setting: 200`. If we try to start postgres with max_connections=100, it will immediately exit. As a workaround we will start it with the values from controldata and set `pending_restart` to true as an indicator that current values of parameters are not matching expectations.""" if self._postgresql.role in ('master', 'primary'): return self._server_parameters options_mapping = { 'max_connections': 'max_connections setting', 'max_prepared_transactions': 'max_prepared_xacts setting', 'max_locks_per_transaction': 'max_locks_per_xact setting' } if self._postgresql.major_version >= 90400: options_mapping['max_worker_processes'] = 'max_worker_processes setting' if self._postgresql.major_version >= 120000: options_mapping['max_wal_senders'] = 'max_wal_senders setting' data = self._postgresql.controldata() effective_configuration = self._server_parameters.copy() for name, cname in options_mapping.items(): value = parse_int(effective_configuration[name]) if cname not in data: logger.warning('%s is missing from pg_controldata output', cname) continue cvalue = parse_int(data[cname]) if cvalue is not None and value is not None and cvalue > value: effective_configuration[name] = cvalue self._postgresql.set_pending_restart(True) # If we are using custom bootstrap with PITR it could fail when values like max_connections # are increased, therefore we disable hot_standby if recovery_target_action == 'promote'. if self._postgresql.bootstrap.running_custom_bootstrap: disable_hot_standby = False if self._postgresql.bootstrap.keep_existing_recovery_conf: disable_hot_standby = True # trust that pgBackRest does the right thing # `pause_at_recovery_target` has no effect if hot_standby is not enabled, therefore we consider only 9.5+ elif self._postgresql.major_version >= 90500 and self._recovery_params: pause_at_recovery_target = parse_bool(self._recovery_params.get('pause_at_recovery_target')) recovery_target_action = self._recovery_params.get( 'recovery_target_action', 'promote' if pause_at_recovery_target is False else 'pause') disable_hot_standby = recovery_target_action == 'promote' if disable_hot_standby: effective_configuration['hot_standby'] = 'off' return effective_configuration @property def replication(self) -> Dict[str, Any]: return self._config['authentication']['replication'] @property def superuser(self) -> Dict[str, Any]: return self._superuser @property def rewind_credentials(self) -> Dict[str, Any]: return self._config['authentication'].get('rewind', self._superuser) \ if self._postgresql.major_version >= 110000 else self._superuser @property def ident_file(self) -> Optional[str]: ident_file = self._server_parameters.get('ident_file') return None if ident_file == self._pg_ident_conf else ident_file @property def hba_file(self) -> Optional[str]: hba_file = self._server_parameters.get('hba_file') return None if hba_file == self._pg_hba_conf else hba_file @property def pg_hba_conf(self) -> str: return self._pg_hba_conf @property def postgresql_conf(self) -> str: return self._postgresql_conf def get(self, key: str, default: Optional[Any] = None) -> Optional[Any]: return self._config.get(key, default) def restore_command(self) -> Optional[str]: return (self.get('recovery_conf') or {}).get('restore_command') patroni-3.2.2/patroni/postgresql/connection.py000066400000000000000000000146151455170150700215760ustar00rootroot00000000000000import logging from contextlib import contextmanager from threading import Lock from typing import Any, Dict, Iterator, List, Optional, Union, Tuple, TYPE_CHECKING if TYPE_CHECKING: # pragma: no cover from psycopg import Connection, Cursor from psycopg2 import connection, cursor from .. import psycopg from ..exceptions import PostgresConnectionException logger = logging.getLogger(__name__) class NamedConnection: """Helper class to manage ``psycopg`` connections from Patroni to PostgreSQL. :ivar server_version: PostgreSQL version in integer format where we are connected to. """ server_version: int def __init__(self, pool: 'ConnectionPool', name: str, kwargs_override: Optional[Dict[str, Any]]) -> None: """Create an instance of :class:`NamedConnection` class. :param pool: reference to a :class:`ConnectionPool` object. :param name: name of the connection. :param kwargs_override: :class:`dict` object with connection parameters that should be different from default values provided by connection *pool*. """ self._pool = pool self._name = name self._kwargs_override = kwargs_override or {} self._lock = Lock() # used to make sure that only one connection to postgres is established self._connection = None @property def _conn_kwargs(self) -> Dict[str, Any]: """Connection parameters for this :class:`NamedConnection`.""" return {**self._pool.conn_kwargs, **self._kwargs_override, 'application_name': f'Patroni {self._name}'} def get(self) -> Union['connection', 'Connection[Any]']: """Get ``psycopg``/``psycopg2`` connection object. .. note:: Opens a new connection if necessary. :returns: ``psycopg`` or ``psycopg2`` connection object. """ with self._lock: if not self._connection or self._connection.closed != 0: logger.info("establishing a new patroni %s connection to postgres", self._name) self._connection = psycopg.connect(**self._conn_kwargs) self.server_version = getattr(self._connection, 'server_version', 0) return self._connection def query(self, sql: str, *params: Any) -> List[Tuple[Any, ...]]: """Execute a query with parameters and optionally returns a response. :param sql: SQL statement to execute. :param params: parameters to pass. :returns: a query response as a list of tuples if there is any. :raises: :exc:`~psycopg.Error` if had issues while executing *sql*. :exc:`~patroni.exceptions.PostgresConnectionException`: if had issues while connecting to the database. """ cursor = None try: with self.get().cursor() as cursor: cursor.execute(sql.encode('utf-8'), params or None) return cursor.fetchall() if cursor.rowcount and cursor.rowcount > 0 else [] except psycopg.Error as exc: if cursor and cursor.connection.closed == 0: # When connected via unix socket, psycopg2 can't recoginze 'connection lost' and leaves # `self._connection.closed == 0`, but the generic exception is raised. It doesn't make # sense to continue with existing connection and we will close it, to avoid its reuse. if type(exc) in (psycopg.DatabaseError, psycopg.OperationalError): self.close() else: raise exc raise PostgresConnectionException('connection problems') from exc def close(self, silent: bool = False) -> bool: """Close the psycopg connection to postgres. :param silent: whether the method should not write logs. :returns: ``True`` if ``psycopg`` connection was closed, ``False`` otherwise.`` """ ret = False if self._connection and self._connection.closed == 0: self._connection.close() if not silent: logger.info("closed patroni %s connection to postgres", self._name) ret = True self._connection = None return ret class ConnectionPool: """Helper class to manage named connections from Patroni to PostgreSQL. The instance keeps named :class:`NamedConnection` objects and parameters that must be used for new connections. """ def __init__(self) -> None: """Create an instance of :class:`ConnectionPool` class.""" self._lock = Lock() self._connections: Dict[str, NamedConnection] = {} self._conn_kwargs: Dict[str, Any] = {} @property def conn_kwargs(self) -> Dict[str, Any]: """Connection parameters that must be used for new ``psycopg`` connections.""" with self._lock: return self._conn_kwargs.copy() @conn_kwargs.setter def conn_kwargs(self, value: Dict[str, Any]) -> None: """Set new connection parameters. :param value: :class:`dict` object with connection parameters. """ with self._lock: self._conn_kwargs = value def get(self, name: str, kwargs_override: Optional[Dict[str, Any]] = None) -> NamedConnection: """Get a new named :class:`NamedConnection` object from the pool. .. note:: Creates a new :class:`NamedConnection` object if it doesn't yet exist in the pool. :param name: name of the connection. :param kwargs_override: :class:`dict` object with connection parameters that should be different from default values provided by :attr:`conn_kwargs`. :returns: :class:`NamedConnection` object. """ with self._lock: if name not in self._connections: self._connections[name] = NamedConnection(self, name, kwargs_override) return self._connections[name] def close(self) -> None: """Close all named connections from Patroni to PostgreSQL registered in the pool.""" with self._lock: closed_connections = [conn.close(True) for conn in self._connections.values()] if any(closed_connections): logger.info("closed patroni connections to postgres") @contextmanager def get_connection_cursor(**kwargs: Any) -> Iterator[Union['cursor', 'Cursor[Any]']]: conn = psycopg.connect(**kwargs) with conn.cursor() as cur: yield cur conn.close() patroni-3.2.2/patroni/postgresql/misc.py000066400000000000000000000055601455170150700203710ustar00rootroot00000000000000import errno import logging import os from typing import Iterable, Tuple from ..exceptions import PostgresException logger = logging.getLogger(__name__) def postgres_version_to_int(pg_version: str) -> int: """Convert the server_version to integer >>> postgres_version_to_int('9.5.3') 90503 >>> postgres_version_to_int('9.3.13') 90313 >>> postgres_version_to_int('10.1') 100001 >>> postgres_version_to_int('10') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... PostgresException: 'Invalid PostgreSQL version format: X.Y or X.Y.Z is accepted: 10' >>> postgres_version_to_int('9.6') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... PostgresException: 'Invalid PostgreSQL version format: X.Y or X.Y.Z is accepted: 9.6' >>> postgres_version_to_int('a.b.c') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... PostgresException: 'Invalid PostgreSQL version: a.b.c' """ try: components = list(map(int, pg_version.split('.'))) except ValueError: raise PostgresException('Invalid PostgreSQL version: {0}'.format(pg_version)) if len(components) < 2 or len(components) == 2 and components[0] < 10 or len(components) > 3: raise PostgresException('Invalid PostgreSQL version format: X.Y or X.Y.Z is accepted: {0}'.format(pg_version)) if len(components) == 2: # new style version numbers, i.e. 10.1 becomes 100001 components.insert(1, 0) return int(''.join('{0:02d}'.format(c) for c in components)) def postgres_major_version_to_int(pg_version: str) -> int: """ >>> postgres_major_version_to_int('10') 100000 >>> postgres_major_version_to_int('9.6') 90600 """ return postgres_version_to_int(pg_version + '.0') def parse_lsn(lsn: str) -> int: t = lsn.split('/') return int(t[0], 16) * 0x100000000 + int(t[1], 16) def parse_history(data: str) -> Iterable[Tuple[int, int, str]]: for line in data.split('\n'): values = line.strip().split('\t') if len(values) == 3: try: yield int(values[0]), parse_lsn(values[1]), values[2] except (IndexError, ValueError): logger.exception('Exception when parsing timeline history line "%s"', values) def format_lsn(lsn: int, full: bool = False) -> str: template = '{0:X}/{1:08X}' if full else '{0:X}/{1:X}' return template.format(lsn >> 32, lsn & 0xFFFFFFFF) def fsync_dir(path: str) -> None: if os.name != 'nt': fd = os.open(path, os.O_DIRECTORY) try: os.fsync(fd) except OSError as e: # Some filesystems don't like fsyncing directories and raise EINVAL. Ignoring it is usually safe. if e.errno != errno.EINVAL: raise finally: os.close(fd) patroni-3.2.2/patroni/postgresql/postmaster.py000066400000000000000000000255351455170150700216430ustar00rootroot00000000000000import logging import multiprocessing import os import psutil import re import signal import subprocess import sys from multiprocessing.connection import Connection from typing import Dict, Optional, List from patroni import PATRONI_ENV_PREFIX, KUBERNETES_ENV_PREFIX # avoid spawning the resource tracker process if sys.version_info >= (3, 8): # pragma: no cover import multiprocessing.resource_tracker multiprocessing.resource_tracker.getfd = lambda: 0 elif sys.version_info >= (3, 4): # pragma: no cover import multiprocessing.semaphore_tracker multiprocessing.semaphore_tracker.getfd = lambda: 0 logger = logging.getLogger(__name__) STOP_SIGNALS = { 'smart': 'TERM', 'fast': 'INT', 'immediate': 'QUIT', } def pg_ctl_start(conn: Connection, cmdline: List[str], env: Dict[str, str]) -> None: if os.name != 'nt': os.setsid() try: postmaster = subprocess.Popen(cmdline, close_fds=True, env=env) conn.send(postmaster.pid) except Exception: logger.exception('Failed to execute %s', cmdline) conn.send(None) conn.close() class PostmasterProcess(psutil.Process): def __init__(self, pid: int) -> None: self._postmaster_pid: Dict[str, str] self.is_single_user = False if pid < 0: pid = -pid self.is_single_user = True super(PostmasterProcess, self).__init__(pid) @staticmethod def _read_postmaster_pidfile(data_dir: str) -> Dict[str, str]: """Reads and parses postmaster.pid from the data directory :returns dictionary of values if successful, empty dictionary otherwise """ pid_line_names = ['pid', 'data_dir', 'start_time', 'port', 'socket_dir', 'listen_addr', 'shmem_key'] try: with open(os.path.join(data_dir, 'postmaster.pid')) as f: return {name: line.rstrip('\n') for name, line in zip(pid_line_names, f)} except IOError: return {} def _is_postmaster_process(self) -> bool: try: start_time = int(self._postmaster_pid.get('start_time', 0)) if start_time and abs(self.create_time() - start_time) > 3: logger.info('Process %s is not postmaster, too much difference between PID file start time %s and ' 'process start time %s', self.pid, self.create_time(), start_time) return False except ValueError: logger.warning('Garbage start time value in pid file: %r', self._postmaster_pid.get('start_time')) # Extra safety check. The process can't be ourselves, our parent or our direct child. if self.pid == os.getpid() or self.pid == os.getppid() or self.ppid() == os.getpid(): logger.info('Patroni (pid=%s, ppid=%s), "fake postmaster" (pid=%s, ppid=%s)', os.getpid(), os.getppid(), self.pid, self.ppid()) return False return True @classmethod def _from_pidfile(cls, data_dir: str) -> Optional['PostmasterProcess']: postmaster_pid = PostmasterProcess._read_postmaster_pidfile(data_dir) try: pid = int(postmaster_pid.get('pid', 0)) if pid: proc = cls(pid) proc._postmaster_pid = postmaster_pid return proc except ValueError: return None @staticmethod def from_pidfile(data_dir: str) -> Optional['PostmasterProcess']: try: proc = PostmasterProcess._from_pidfile(data_dir) return proc if proc and proc._is_postmaster_process() else None except psutil.NoSuchProcess: return None @classmethod def from_pid(cls, pid: int) -> Optional['PostmasterProcess']: try: return cls(pid) except psutil.NoSuchProcess: return None def signal_kill(self) -> bool: """to suspend and kill postmaster and all children :returns True if postmaster and children are killed, False if error """ try: self.suspend() except psutil.NoSuchProcess: return True except psutil.Error as e: logger.warning('Failed to suspend postmaster: %s', e) try: children = self.children(recursive=True) except psutil.NoSuchProcess: return True except psutil.Error as e: logger.warning('Failed to get a list of postmaster children: %s', e) children = [] try: self.kill() except psutil.NoSuchProcess: return True except psutil.Error as e: logger.warning('Could not kill postmaster: %s', e) return False for child in children: try: child.kill() except psutil.Error: pass psutil.wait_procs(children + [self]) return True def signal_stop(self, mode: str, pg_ctl: str = 'pg_ctl') -> Optional[bool]: """Signal postmaster process to stop :returns None if signaled, True if process is already gone, False if error """ if self.is_single_user: logger.warning("Cannot stop server; single-user server is running (PID: {0})".format(self.pid)) return False if os.name != 'posix': return self.pg_ctl_kill(mode, pg_ctl) try: self.send_signal(getattr(signal, 'SIG' + STOP_SIGNALS[mode])) except psutil.NoSuchProcess: return True except psutil.AccessDenied as e: logger.warning("Could not send stop signal to PostgreSQL (error: {0})".format(e)) return False return None def pg_ctl_kill(self, mode: str, pg_ctl: str) -> Optional[bool]: try: status = subprocess.call([pg_ctl, "kill", STOP_SIGNALS[mode], str(self.pid)]) except OSError: return False if status == 0: return None else: return not self.is_running() def wait_for_user_backends_to_close(self, stop_timeout: Optional[float]) -> None: # These regexps are cross checked against versions PostgreSQL 9.1 .. 15 aux_proc_re = re.compile("(?:postgres:)( .*:)? (?:(?:archiver|startup|autovacuum launcher|autovacuum worker|" "checkpointer|logger|stats collector|wal receiver|wal writer|writer)(?: process )?|" "walreceiver|wal sender process|walsender|walwriter|background writer|" "logical replication launcher|logical replication worker for|bgworker:) ") try: children = self.children() except psutil.Error: return logger.debug('Failed to get list of postmaster children') user_backends: List[psutil.Process] = [] user_backends_cmdlines: Dict[int, str] = {} for child in children: try: cmdline = child.cmdline() if cmdline and not aux_proc_re.match(cmdline[0]): user_backends.append(child) user_backends_cmdlines[child.pid] = cmdline[0] except psutil.NoSuchProcess: pass if user_backends: logger.debug('Waiting for user backends %s to close', ', '.join(user_backends_cmdlines.values())) _, live = psutil.wait_procs(user_backends, stop_timeout) if stop_timeout and live: live = [user_backends_cmdlines[b.pid] for b in live] logger.warning('Backends still alive after %s: %s', stop_timeout, ', '.join(live)) else: logger.debug("Backends closed") @staticmethod def start(pgcommand: str, data_dir: str, conf: str, options: List[str]) -> Optional['PostmasterProcess']: # Unfortunately `pg_ctl start` does not return postmaster pid to us. Without this information # it is hard to know the current state of postgres startup, so we had to reimplement pg_ctl start # in python. It will start postgres, wait for port to be open and wait until postgres will start # accepting connections. # Important!!! We can't just start postgres using subprocess.Popen, because in this case it # will be our child for the rest of our live and we will have to take care of it (`waitpid`). # So we will use the same approach as pg_ctl uses: start a new process, which will start postgres. # This process will write postmaster pid to stdout and exit immediately. Now it's responsibility # of init process to take care about postmaster. # In order to make everything portable we can't use fork&exec approach here, so we will call # ourselves and pass list of arguments which must be used to start postgres. # On Windows, in order to run a side-by-side assembly the specified env must include a valid SYSTEMROOT. env = {p: os.environ[p] for p in os.environ if not p.startswith( PATRONI_ENV_PREFIX) and not p.startswith(KUBERNETES_ENV_PREFIX)} try: proc = PostmasterProcess._from_pidfile(data_dir) if proc and not proc._is_postmaster_process(): # Upon start postmaster process performs various safety checks if there is a postmaster.pid # file in the data directory. Although Patroni already detected that the running process # corresponding to the postmaster.pid is not a postmaster, the new postmaster might fail # to start, because it thinks that postmaster.pid is already locked. # Important!!! Unlink of postmaster.pid isn't an option, because it has a lot of nasty race conditions. # Luckily there is a workaround to this problem, we can pass the pid from postmaster.pid # in the `PG_GRANDPARENT_PID` environment variable and postmaster will ignore it. logger.info("Telling pg_ctl that it is safe to ignore postmaster.pid for process %s", proc.pid) env['PG_GRANDPARENT_PID'] = str(proc.pid) except psutil.NoSuchProcess: pass cmdline = [pgcommand, '-D', data_dir, '--config-file={}'.format(conf)] + options logger.debug("Starting postgres: %s", " ".join(cmdline)) ctx = multiprocessing.get_context('spawn') parent_conn, child_conn = ctx.Pipe(False) proc = ctx.Process(target=pg_ctl_start, args=(child_conn, cmdline, env)) proc.start() pid = parent_conn.recv() proc.join() if pid is None: return logger.info('postmaster pid=%s', pid) # TODO: In an extremely unlikely case, the process could have exited and the pid reassigned. The start # initiation time is not accurate enough to compare to create time as start time would also likely # be relatively close. We need the subprocess extract pid+start_time in a race free manner. return PostmasterProcess.from_pid(pid) patroni-3.2.2/patroni/postgresql/rewind.py000066400000000000000000000667331455170150700207370ustar00rootroot00000000000000import logging import os import re import shlex import shutil import subprocess from enum import IntEnum from threading import Lock, Thread from typing import Any, Callable, Dict, List, Optional, Union, Tuple from . import Postgresql from .connection import get_connection_cursor from .misc import format_lsn, fsync_dir, parse_history, parse_lsn from ..async_executor import CriticalTask from ..dcs import Leader, RemoteMember logger = logging.getLogger(__name__) class REWIND_STATUS(IntEnum): INITIAL = 0 CHECKPOINT = 1 CHECK = 2 NEED = 3 NOT_NEED = 4 SUCCESS = 5 FAILED = 6 class Rewind(object): def __init__(self, postgresql: Postgresql) -> None: self._postgresql = postgresql self._checkpoint_task_lock = Lock() self.reset_state() @staticmethod def configuration_allows_rewind(data: Dict[str, str]) -> bool: return data.get('wal_log_hints setting', 'off') == 'on' or data.get('Data page checksum version', '0') != '0' @property def enabled(self) -> bool: return bool(self._postgresql.config.get('use_pg_rewind')) @property def can_rewind(self) -> bool: """ check if pg_rewind executable is there and that pg_controldata indicates we have either wal_log_hints or checksums turned on """ # low-hanging fruit: check if pg_rewind configuration is there if not self.enabled: return False cmd = [self._postgresql.pgcommand('pg_rewind'), '--help'] try: ret = subprocess.call(cmd, stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT) if ret != 0: # pg_rewind is not there, close up the shop and go home return False except OSError: return False return self.configuration_allows_rewind(self._postgresql.controldata()) @property def should_remove_data_directory_on_diverged_timelines(self) -> bool: return bool(self._postgresql.config.get('remove_data_directory_on_diverged_timelines')) @property def can_rewind_or_reinitialize_allowed(self) -> bool: return self.should_remove_data_directory_on_diverged_timelines or self.can_rewind def trigger_check_diverged_lsn(self) -> None: if self.can_rewind_or_reinitialize_allowed and self._state != REWIND_STATUS.NEED: self._state = REWIND_STATUS.CHECK @staticmethod def check_leader_is_not_in_recovery(conn_kwargs: Dict[str, Any]) -> Optional[bool]: try: with get_connection_cursor(connect_timeout=3, options='-c statement_timeout=2000', **conn_kwargs) as cur: cur.execute('SELECT pg_catalog.pg_is_in_recovery()') row = cur.fetchone() if not row or not row[0]: return True logger.info('Leader is still in_recovery and therefore can\'t be used for rewind') except Exception: return logger.exception('Exception when working with leader') @staticmethod def check_leader_has_run_checkpoint(conn_kwargs: Dict[str, Any]) -> Optional[str]: try: with get_connection_cursor(connect_timeout=3, options='-c statement_timeout=2000', **conn_kwargs) as cur: cur.execute("SELECT NOT pg_catalog.pg_is_in_recovery()" " AND ('x' || pg_catalog.substr(pg_catalog.pg_walfile_name(" " pg_catalog.pg_current_wal_lsn()), 1, 8))::bit(32)::int = timeline_id" " FROM pg_catalog.pg_control_checkpoint()") row = cur.fetchone() if not row or not row[0]: return 'leader has not run a checkpoint yet' except Exception: logger.exception('Exception when working with leader') return 'not accessible or not healty' def _get_checkpoint_end(self, timeline: int, lsn: int) -> int: """Get the end of checkpoint record from WAL. .. note:: The checkpoint record size in WAL depends on postgres major version and platform (memory alignment). Hence, the only reliable way to figure out where it ends, is to read the record from file with the help of ``pg_waldump`` and parse the output. We are trying to read two records, and expect that it will fail to read the second record with message: fatal: error in WAL record at 0/182E220: invalid record length at 0/182E298: wanted 24, got 0; or fatal: error in WAL record at 0/182E220: invalid record length at 0/182E298: expected at least 24, got 0 The error message contains information about LSN of the next record, which is exactly where checkpoint ends. :param timeline: the checkpoint *timeline* from ``pg_controldata``. :param lsn: the checkpoint *location* as :class:`int` from ``pg_controldata``. :returns: the end of checkpoint record as :class:`int` or ``0`` if failed to parse ``pg_waldump`` output. """ lsn8 = format_lsn(lsn, True) lsn_str = format_lsn(lsn) out, err = self._postgresql.waldump(timeline, lsn_str, 2) if out is not None and err is not None: out = out.decode('utf-8').rstrip().split('\n') err = err.decode('utf-8').rstrip().split('\n') pattern = 'error in WAL record at {0}: invalid record length at '.format(lsn_str) if len(out) == 1 and len(err) == 1 and ', lsn: {0}, prev '.format(lsn8) in out[0] and pattern in err[0]: i = err[0].find(pattern) + len(pattern) # Message format depends on the major version: # * expected at least -- starting from v16 # * wanted -- before v16 # We will simply check all possible combinations. for pattern in (': expected at least ', ': wanted '): j = err[0].find(pattern, i) if j > -1: try: return parse_lsn(err[0][i:j]) except Exception as e: logger.error('Failed to parse lsn %s: %r', err[0][i:j], e) logger.error('Failed to parse pg_%sdump output', self._postgresql.wal_name) logger.error(' stdout=%s', '\n'.join(out)) logger.error(' stderr=%s', '\n'.join(err)) return 0 def _get_local_timeline_lsn_from_controldata(self) -> Tuple[Optional[bool], Optional[int], Optional[int]]: in_recovery = timeline = lsn = None data = self._postgresql.controldata() try: if data.get('Database cluster state') in ('shut down in recovery', 'in archive recovery'): in_recovery = True lsn = data.get('Minimum recovery ending location') timeline = int(data.get("Min recovery ending loc's timeline", "")) if lsn == '0/0' or timeline == 0: # it was a primary when it crashed data['Database cluster state'] = 'shut down' if data.get('Database cluster state') == 'shut down': in_recovery = False lsn = data.get('Latest checkpoint location') timeline = int(data.get("Latest checkpoint's TimeLineID", "")) except (TypeError, ValueError): logger.exception('Failed to get local timeline and lsn from pg_controldata output') if lsn is not None: try: lsn = parse_lsn(lsn) except (IndexError, ValueError) as e: logger.error('Exception when parsing lsn %s: %r', lsn, e) lsn = None return in_recovery, timeline, lsn def _get_local_timeline_lsn(self) -> Tuple[Optional[bool], Optional[int], Optional[int]]: if self._postgresql.is_running(): # if postgres is running - get timeline from replication connection in_recovery = True timeline = self._postgresql.get_replica_timeline() lsn = self._postgresql.replayed_location() else: # otherwise analyze pg_controldata output in_recovery, timeline, lsn = self._get_local_timeline_lsn_from_controldata() log_lsn = format_lsn(lsn) if isinstance(lsn, int) else lsn logger.info('Local timeline=%s lsn=%s', timeline, log_lsn) return in_recovery, timeline, lsn @staticmethod def _log_primary_history(history: List[Tuple[int, int, str]], i: int) -> None: start = max(0, i - 3) end = None if i + 4 >= len(history) else i + 2 history_show: List[str] = [] def format_history_line(line: Tuple[int, int, str]) -> str: return '{0}\t{1}\t{2}'.format(line[0], format_lsn(line[1]), line[2]) line = None for line in history[start:end]: history_show.append(format_history_line(line)) if line != history[-1]: history_show.append('...') history_show.append(format_history_line(history[-1])) logger.info('primary: history=%s', '\n'.join(history_show)) def _conn_kwargs(self, member: Union[Leader, RemoteMember], auth: Dict[str, Any]) -> Dict[str, Any]: ret = member.conn_kwargs(auth) if not ret.get('dbname'): ret['dbname'] = self._postgresql.database # Add target_session_attrs in case more than one hostname is specified # (libpq client-side failover) making sure we hit the primary if 'target_session_attrs' not in ret and self._postgresql.major_version >= 100000: ret['target_session_attrs'] = 'read-write' return ret def _check_timeline_and_lsn(self, leader: Union[Leader, RemoteMember]) -> None: in_recovery, local_timeline, local_lsn = self._get_local_timeline_lsn() if local_timeline is None or local_lsn is None: return if isinstance(leader, Leader) and leader.member.data.get('role') not in ('master', 'primary'): return # We want to use replication credentials when connecting to the "postgres" database in case if # `use_pg_rewind` isn't enabled and only `remove_data_directory_on_diverged_timelines` is set # for Postgresql older than v11 (where Patroni can't use a dedicated user for rewind). # In all other cases we will use rewind or superuser credentials. check_credentials = self._postgresql.config.replication if not self.enabled and\ self.should_remove_data_directory_on_diverged_timelines and\ self._postgresql.major_version < 110000 else self._postgresql.config.rewind_credentials if not self.check_leader_is_not_in_recovery(self._conn_kwargs(leader, check_credentials)): return history = need_rewind = None try: with self._postgresql.get_replication_connection_cursor(**leader.conn_kwargs()) as cur: cur.execute('IDENTIFY_SYSTEM') row = cur.fetchone() if row: primary_timeline = row[1] logger.info('primary_timeline=%s', primary_timeline) if local_timeline > primary_timeline: # Not always supported by pg_rewind need_rewind = True elif local_timeline == primary_timeline: need_rewind = False elif primary_timeline > 1: cur.execute('TIMELINE_HISTORY {0}'.format(primary_timeline).encode('utf-8')) row = cur.fetchone() if row: history = row[1] if not isinstance(history, str): history = bytes(history).decode('utf-8') logger.debug('primary: history=%s', history) except Exception: return logger.exception('Exception when working with primary via replication connection') if history is not None: history = list(parse_history(history)) i = len(history) for i, (parent_timeline, switchpoint, _) in enumerate(history): if parent_timeline == local_timeline: # We don't need to rewind when: # 1. for replica: replayed location is not ahead of switchpoint # 2. for the former primary: end of checkpoint record is the same as switchpoint if in_recovery: need_rewind = local_lsn > switchpoint elif local_lsn >= switchpoint: need_rewind = True else: need_rewind = switchpoint != self._get_checkpoint_end(local_timeline, local_lsn) break elif parent_timeline > local_timeline: need_rewind = True break else: need_rewind = True self._log_primary_history(history, i) self._state = need_rewind and REWIND_STATUS.NEED or REWIND_STATUS.NOT_NEED def rewind_or_reinitialize_needed_and_possible(self, leader: Union[Leader, RemoteMember, None]) -> bool: if leader and leader.name != self._postgresql.name and leader.conn_url and self._state == REWIND_STATUS.CHECK: self._check_timeline_and_lsn(leader) return bool(leader and leader.conn_url) and self._state == REWIND_STATUS.NEED def __checkpoint(self, task: CriticalTask, wakeup: Callable[..., Any]) -> None: try: result = self._postgresql.checkpoint() except Exception as e: result = 'Exception: ' + str(e) with task: task.complete(not bool(result)) if task.result: wakeup() def ensure_checkpoint_after_promote(self, wakeup: Callable[..., Any]) -> None: """After promote issue a CHECKPOINT from a new thread and asynchronously check the result. In case if CHECKPOINT failed, just check that timeline in pg_control was updated.""" if self._state != REWIND_STATUS.CHECKPOINT and self._postgresql.is_primary(): with self._checkpoint_task_lock: if self._checkpoint_task: with self._checkpoint_task: if self._checkpoint_task.result is not None: self._state = REWIND_STATUS.CHECKPOINT self._checkpoint_task = None elif self._postgresql.get_primary_timeline() == self._postgresql.pg_control_timeline(): self._state = REWIND_STATUS.CHECKPOINT else: self._checkpoint_task = CriticalTask() Thread(target=self.__checkpoint, args=(self._checkpoint_task, wakeup)).start() def checkpoint_after_promote(self) -> bool: return self._state == REWIND_STATUS.CHECKPOINT def _buid_archiver_command(self, command: str, wal_filename: str) -> str: """Replace placeholders in the given archiver command's template. Applicable for archive_command and restore_command. Can also be used for archive_cleanup_command and recovery_end_command, however %r value is always set to 000000010000000000000001.""" cmd = '' length = len(command) i = 0 while i < length: if command[i] == '%' and i + 1 < length: i += 1 if command[i] == 'p': cmd += os.path.join(self._postgresql.wal_dir, wal_filename) elif command[i] == 'f': cmd += wal_filename elif command[i] == 'r': cmd += '000000010000000000000001' elif command[i] == '%': cmd += '%' else: cmd += '%' i -= 1 else: cmd += command[i] i += 1 return cmd def _fetch_missing_wal(self, restore_command: str, wal_filename: str) -> bool: cmd = self._buid_archiver_command(restore_command, wal_filename) logger.info('Trying to fetch the missing wal: %s', cmd) return self._postgresql.cancellable.call(shlex.split(cmd)) == 0 def _find_missing_wal(self, data: bytes) -> Optional[str]: # could not open file "$PGDATA/pg_wal/0000000A00006AA100000068": No such file or directory pattern = 'could not open file "' for line in data.decode('utf-8').split('\n'): b = line.find(pattern) if b > -1: b += len(pattern) e = line.find('": ', b) if e > -1 and '/' in line[b:e]: waldir, wal_filename = line[b:e].rsplit('/', 1) if waldir.endswith('/pg_' + self._postgresql.wal_name) and len(wal_filename) == 24: return wal_filename def _archive_ready_wals(self) -> None: """Try to archive WALs that have .ready files just in case archive_mode was not set to 'always' before promote, while after it the WALs were recycled on the promoted replica. With this we prevent the entire loss of such WALs and the consequent old leader's start failure.""" archive_mode = self._postgresql.get_guc_value('archive_mode') archive_cmd = self._postgresql.get_guc_value('archive_command') if archive_mode not in ('on', 'always') or not archive_cmd: return walseg_regex = re.compile(r'^[0-9A-F]{24}(\.partial){0,1}\.ready$') status_dir = os.path.join(self._postgresql.wal_dir, 'archive_status') try: wals_to_archive = [f[:-6] for f in os.listdir(status_dir) if walseg_regex.match(f)] except OSError as e: return logger.error('Unable to list %s: %r', status_dir, e) # skip fsync, as postgres --single or pg_rewind will anyway run it for wal in sorted(wals_to_archive): old_name = os.path.join(status_dir, wal + '.ready') # wal file might have alredy been archived if os.path.isfile(old_name) and os.path.isfile(os.path.join(self._postgresql.wal_dir, wal)): cmd = self._buid_archiver_command(archive_cmd, wal) # it is the author of archive_command, who is responsible # for not overriding the WALs already present in archive logger.info('Trying to archive %s: %s', wal, cmd) if self._postgresql.cancellable.call([cmd], shell=True) == 0: new_name = os.path.join(status_dir, wal + '.done') try: shutil.move(old_name, new_name) except Exception as e: logger.error('Unable to rename %s to %s: %r', old_name, new_name, e) else: logger.info('Failed to archive WAL segment %s', wal) def _maybe_clean_pg_replslot(self) -> None: """Clean pg_replslot directory if pg version is less then 11 (pg_rewind deletes $PGDATA/pg_replslot content only since pg11).""" if self._postgresql.major_version < 110000: replslot_dir = self._postgresql.slots_handler.pg_replslot_dir try: for f in os.listdir(replslot_dir): shutil.rmtree(os.path.join(replslot_dir, f)) fsync_dir(replslot_dir) except Exception as e: logger.warning('Unable to clean %s: %r', replslot_dir, e) def pg_rewind(self, r: Dict[str, Any]) -> bool: # prepare pg_rewind connection env = self._postgresql.config.write_pgpass(r) env.update(LANG='C', LC_ALL='C', PGOPTIONS='-c statement_timeout=0') dsn = self._postgresql.config.format_dsn(r, True) logger.info('running pg_rewind from %s', dsn) restore_command = (self._postgresql.config.get('recovery_conf') or {}).get('restore_command') \ if self._postgresql.major_version < 120000 else self._postgresql.get_guc_value('restore_command') # Until v15 pg_rewind expected postgresql.conf to be inside $PGDATA, which is not the case on e.g. Debian pg_rewind_can_restore = restore_command and (self._postgresql.major_version >= 150000 or (self._postgresql.major_version >= 130000 and self._postgresql.config.config_dir == self._postgresql.data_dir)) cmd = [self._postgresql.pgcommand('pg_rewind')] if pg_rewind_can_restore: cmd.append('--restore-target-wal') if self._postgresql.major_version >= 150000 and\ self._postgresql.config.config_dir != self._postgresql.data_dir: cmd.append('--config-file={0}'.format(self._postgresql.config.postgresql_conf)) cmd.extend(['-D', self._postgresql.data_dir, '--source-server', dsn]) while True: results: Dict[str, bytes] = {} ret = self._postgresql.cancellable.call(cmd, env=env, communicate=results) logger.info('pg_rewind exit code=%s', ret) if ret is None: return False logger.info(' stdout=%s', results['stdout'].decode('utf-8')) logger.info(' stderr=%s', results['stderr'].decode('utf-8')) if ret == 0: return True if not restore_command or pg_rewind_can_restore: return False missing_wal = self._find_missing_wal(results['stderr']) or self._find_missing_wal(results['stdout']) if not missing_wal: return False if not self._fetch_missing_wal(restore_command, missing_wal): logger.info('Failed to fetch WAL segment %s required for pg_rewind', missing_wal) return False def execute(self, leader: Union[Leader, RemoteMember]) -> Optional[bool]: if self._postgresql.is_running() and not self._postgresql.stop(checkpoint=False): return logger.warning('Can not run pg_rewind because postgres is still running') self._archive_ready_wals() # prepare pg_rewind connection r = self._conn_kwargs(leader, self._postgresql.config.rewind_credentials) # 1. make sure that we are really trying to rewind from the primary # 2. make sure that pg_control contains the new timeline by: # running a checkpoint or # waiting until Patroni on the primary will expose checkpoint_after_promote=True checkpoint_status = leader.checkpoint_after_promote if isinstance(leader, Leader) else None if checkpoint_status is None: # we are the standby-cluster leader or primary still runs the old Patroni # superuser credentials match rewind_credentials if the latter are not provided or we run 10 or older if self._postgresql.config.superuser == self._postgresql.config.rewind_credentials: leader_status = self._postgresql.checkpoint( self._conn_kwargs(leader, self._postgresql.config.superuser)) else: # we run 11+ and have a dedicated pg_rewind user leader_status = self.check_leader_has_run_checkpoint(r) if leader_status: # we tried to run/check for a checkpoint on the remote leader, but it failed return logger.warning('Can not use %s for rewind: %s', leader.name, leader_status) elif not checkpoint_status: return logger.info('Waiting for checkpoint on %s before rewind', leader.name) elif not self.check_leader_is_not_in_recovery(r): return if self.pg_rewind(r): self._maybe_clean_pg_replslot() self._state = REWIND_STATUS.SUCCESS else: if not self.check_leader_is_not_in_recovery(r): logger.warning('Failed to rewind because primary %s become unreachable', leader.name) if not self.can_rewind: # It is possible that the previous attempt damaged pg_control file! self._state = REWIND_STATUS.FAILED else: logger.error('Failed to rewind from healty primary: %s', leader.name) self._state = REWIND_STATUS.FAILED if self.failed: for name in ('remove_data_directory_on_rewind_failure', 'remove_data_directory_on_diverged_timelines'): if self._postgresql.config.get(name): logger.warning('%s is set. removing...', name) self._postgresql.remove_data_directory() self._state = REWIND_STATUS.INITIAL break return False def reset_state(self) -> None: self._state = REWIND_STATUS.INITIAL with self._checkpoint_task_lock: self._checkpoint_task = None @property def is_needed(self) -> bool: return self._state in (REWIND_STATUS.CHECK, REWIND_STATUS.NEED) @property def executed(self) -> bool: return self._state > REWIND_STATUS.NOT_NEED @property def failed(self) -> bool: return self._state == REWIND_STATUS.FAILED def read_postmaster_opts(self) -> Dict[str, str]: """returns the list of option names/values from postgres.opts, Empty dict if read failed or no file""" result: Dict[str, str] = {} try: with open(os.path.join(self._postgresql.data_dir, 'postmaster.opts')) as f: data = f.read() for opt in data.split('" "'): if '=' in opt and opt.startswith('--'): name, val = opt.split('=', 1) result[name.strip('-')] = val.rstrip('"\n') except IOError: logger.exception('Error when reading postmaster.opts') return result def single_user_mode(self, communicate: Optional[Dict[str, Any]] = None, options: Optional[Dict[str, str]] = None) -> Optional[int]: """run a given command in a single-user mode. If the command is empty - then just start and stop""" cmd = [self._postgresql.pgcommand('postgres'), '--single', '-D', self._postgresql.data_dir] for opt, val in sorted((options or {}).items()): cmd.extend(['-c', '{0}={1}'.format(opt, val)]) # need a database name to connect cmd.append('template1') return self._postgresql.cancellable.call(cmd, communicate=communicate) def cleanup_archive_status(self) -> None: status_dir = os.path.join(self._postgresql.wal_dir, 'archive_status') try: for f in os.listdir(status_dir): path = os.path.join(status_dir, f) try: if os.path.islink(path): os.unlink(path) elif os.path.isfile(path): os.remove(path) except OSError: logger.exception('Unable to remove %s', path) except OSError: logger.exception('Unable to list %s', status_dir) def ensure_clean_shutdown(self) -> Optional[bool]: self._archive_ready_wals() self.cleanup_archive_status() # Start in a single user mode and stop to produce a clean shutdown opts = self.read_postmaster_opts() opts.update({'archive_mode': 'on', 'archive_command': 'false'}) self._postgresql.config.remove_recovery_conf() output: Dict[str, bytes] = {} ret = self.single_user_mode(communicate=output, options=opts) if ret != 0: logger.error('Crash recovery finished with code=%s', ret) logger.info(' stdout=%s', output['stdout'].decode('utf-8')) logger.info(' stderr=%s', output['stderr'].decode('utf-8')) return ret == 0 or None patroni-3.2.2/patroni/postgresql/slots.py000066400000000000000000001116201455170150700205750ustar00rootroot00000000000000"""Replication slot handling. Provides classes for the creation, monitoring, management and synchronisation of PostgreSQL replication slots. """ import logging import os import shutil from collections import defaultdict from contextlib import contextmanager from threading import Condition, Thread from typing import Any, Dict, Iterator, List, Optional, Union, Tuple, TYPE_CHECKING, Collection from .connection import get_connection_cursor from .misc import format_lsn, fsync_dir from ..dcs import Cluster, Leader from ..file_perm import pg_perm from ..psycopg import OperationalError if TYPE_CHECKING: # pragma: no cover from psycopg import Cursor from psycopg2 import cursor from . import Postgresql logger = logging.getLogger(__name__) def compare_slots(s1: Dict[str, Any], s2: Dict[str, Any], dbid: str = 'database') -> bool: """Compare 2 replication slot objects for equality. ..note :: If the first argument is a ``physical`` replication slot then only the `type` of the second slot is compared. If the first argument is another ``type`` (e.g. ``logical``) then *dbid* and ``plugin`` are compared. :param s1: First slot dictionary to be compared. :param s2: Second slot dictionary to be compared. :param dbid: Optional attribute to be compared when comparing ``logical`` replication slots. :return: ``True`` if the slot ``type`` of *s1* and *s2* is matches, and the ``type`` of *s1* is ``physical``, OR the ``types`` match AND the *dbid* and ``plugin`` attributes are equal. """ return (s1['type'] == s2['type'] and (s1['type'] == 'physical' or s1.get(dbid) == s2.get(dbid) and s1['plugin'] == s2['plugin'])) class SlotsAdvanceThread(Thread): """Daemon process :class:``Thread`` object for advancing logical replication slots on replicas. This ensures that slot advancing queries sent to postgres do not block the main loop. """ def __init__(self, slots_handler: 'SlotsHandler') -> None: """Create and start a new thread for handling slot advance queries. :param slots_handler: The calling class instance for reference to slot information attributes. """ super().__init__() self.daemon = True self._slots_handler = slots_handler # _copy_slots and _failed are used to asynchronously give some feedback to the main thread self._copy_slots: List[str] = [] self._failed = False # {'dbname1': {'slot1': 100, 'slot2': 100}, 'dbname2': {'slot3': 100}} self._scheduled: Dict[str, Dict[str, int]] = defaultdict(dict) self._condition = Condition() # protect self._scheduled from concurrent access and to wakeup the run() method self.start() def sync_slot(self, cur: Union['cursor', 'Cursor[Any]'], database: str, slot: str, lsn: int) -> None: """Execute a ``pg_replication_slot_advance`` query and store success for scheduled synchronisation task. :param cur: database connection cursor. :param database: name of the database associated with the slot. :param slot: name of the slot to be synchronised. :param lsn: last known LSN position """ failed = copy = False try: cur.execute("SELECT pg_catalog.pg_replication_slot_advance(%s, %s)", (slot, format_lsn(lsn))) except Exception as e: logger.error("Failed to advance logical replication slot '%s': %r", slot, e) failed = True copy = isinstance(e, OperationalError) and e.diag.sqlstate == '58P01' # WAL file is gone with self._condition: if self._scheduled and failed: if copy and slot not in self._copy_slots: self._copy_slots.append(slot) self._failed = True new_lsn = self._scheduled.get(database, {}).get(slot, 0) # remove slot from the self._scheduled structure only if it wasn't changed if new_lsn == lsn and database in self._scheduled: self._scheduled[database].pop(slot) if not self._scheduled[database]: self._scheduled.pop(database) def sync_slots_in_database(self, database: str, slots: List[str]) -> None: """Synchronise slots for a single database. :param database: name of the database. :param slots: list of slot names to synchronise. """ with self._slots_handler.get_local_connection_cursor(dbname=database, options='-c statement_timeout=0') as cur: for slot in slots: with self._condition: lsn = self._scheduled.get(database, {}).get(slot, 0) if lsn: self.sync_slot(cur, database, slot, lsn) def sync_slots(self) -> None: """Synchronise slots for all scheduled databases.""" with self._condition: databases = list(self._scheduled.keys()) for database in databases: with self._condition: slots = list(self._scheduled.get(database, {}).keys()) if slots: try: self.sync_slots_in_database(database, slots) except Exception as e: logger.error('Failed to advance replication slots in database %s: %r', database, e) def run(self) -> None: """Thread main loop entrypoint. .. note:: Thread will wait until a sync is scheduled from outside, normally triggered during the HA loop or a wakeup call. """ while True: with self._condition: if not self._scheduled: self._condition.wait() self.sync_slots() def schedule(self, advance_slots: Dict[str, Dict[str, int]]) -> Tuple[bool, List[str]]: """Trigger a synchronisation of slots. This is the main entrypoint for Patroni HA loop wakeup call. :param advance_slots: dictionary containing slots that need to be advanced :return: tuple of failure status and a list of slots to be copied """ with self._condition: for database, values in advance_slots.items(): self._scheduled[database].update(values) ret = (self._failed, self._copy_slots) self._copy_slots = [] self._failed = False self._condition.notify() return ret def on_promote(self) -> None: """Reset state of the daemon.""" with self._condition: self._scheduled.clear() self._failed = False self._copy_slots = [] class SlotsHandler: """Handler for managing and storing information on replication slots in PostgreSQL. :ivar pg_replslot_dir: system location path of the PostgreSQL replication slots. :ivar _logical_slots_processing_queue: yet to be processed logical replication slots on the primary """ def __init__(self, postgresql: 'Postgresql') -> None: """Create an instance with storage attributes for replication slots and schedule the first synchronisation. :param postgresql: Calling class instance providing interface to PostgreSQL. """ self._force_readiness_check = False self._schedule_load_slots = False self._postgresql = postgresql self._advance = None self._replication_slots: Dict[str, Dict[str, Any]] = {} # already existing replication slots self._logical_slots_processing_queue: Dict[str, Optional[int]] = {} self.pg_replslot_dir = os.path.join(self._postgresql.data_dir, 'pg_replslot') self.schedule() def _query(self, sql: str, *params: Any) -> List[Tuple[Any, ...]]: """Helper method for :meth:`Postgresql.query`. :param sql: SQL statement to execute. :param params: parameters to pass through to :meth:`Postgresql.query`. :returns: query response. """ return self._postgresql.query(sql, *params, retry=False) @staticmethod def _copy_items(src: Dict[str, Any], dst: Dict[str, Any], keys: Optional[Collection[str]] = None) -> None: """Select values from *src* dictionary to update in *dst* dictionary for optional supplied *keys*. :param src: source dictionary that *keys* will be looked up from. :param dst: destination dictionary to be updated. :param keys: optional list of keys to be looked up in the source dictionary. """ dst.update({key: src[key] for key in keys or ('datoid', 'catalog_xmin', 'confirmed_flush_lsn')}) def process_permanent_slots(self, slots: List[Dict[str, Any]]) -> Dict[str, int]: """Process replication slot information from the host and prepare information used in subsequent cluster tasks. .. note:: This methods solves three problems. The ``cluster_info_query`` from :class:``Postgresql`` is executed every HA loop and returns information about all replication slots that exists on the current host. Based on this information perform the following actions: 1. For the primary we want to expose to DCS permanent logical slots, therefore build (and return) a dict that maps permanent logical slot names to ``confirmed_flush_lsn``. 2. detect if one of the previously known permanent slots is missing and schedule resync. 3. Update the local cache with the fresh ``catalog_xmin`` and ``confirmed_flush_lsn`` for every known slot. This info is used when performing the check of logical slot readiness on standbys. :param slots: replication slot information that exists on the current host. :return: dictionary of logical slot names to ``confirmed_flush_lsn``. """ ret: Dict[str, int] = {} slots_dict: Dict[str, Dict[str, Any]] = {slot['slot_name']: slot for slot in slots or []} for name, value in slots_dict.items(): if name in self._replication_slots: if compare_slots(value, self._replication_slots[name], 'datoid'): if value['type'] == 'logical': ret[name] = value['confirmed_flush_lsn'] self._copy_items(value, self._replication_slots[name]) else: self._replication_slots[name]['restart_lsn'] = ret[name] = value['restart_lsn'] else: self._schedule_load_slots = True # It could happen that the slot was deleted in the background, we want to detect this case if any(name not in slots_dict for name in self._replication_slots.keys()): self._schedule_load_slots = True return ret def load_replication_slots(self) -> None: """Query replication slot information from the database and store it for processing by other tasks. .. note:: Only supported from PostgreSQL version 9.4 onwards. Store replication slot ``name``, ``type``, ``plugin``, ``database`` and ``datoid``. If PostgreSQL version is 10 or newer also store ``catalog_xmin`` and ``confirmed_flush_lsn``. When using logical slots, store information separately for slot synchronisation on replica nodes. """ if self._postgresql.major_version >= 90400 and self._schedule_load_slots: replication_slots: Dict[str, Dict[str, Any]] = {} pg_wal_lsn_diff = f"pg_catalog.pg_{self._postgresql.wal_name}_{self._postgresql.lsn_name}_diff" extra = f", catalog_xmin, {pg_wal_lsn_diff}(confirmed_flush_lsn, '0/0')::bigint" \ if self._postgresql.major_version >= 100000 else "" skip_temp_slots = ' WHERE NOT temporary' if self._postgresql.major_version >= 100000 else '' for r in self._query(f"SELECT slot_name, slot_type, {pg_wal_lsn_diff}(restart_lsn, '0/0')::bigint, plugin," f" database, datoid{extra} FROM pg_catalog.pg_replication_slots{skip_temp_slots}"): value = {'type': r[1]} if r[1] == 'logical': value.update(plugin=r[3], database=r[4], datoid=r[5]) if self._postgresql.major_version >= 100000: value.update(catalog_xmin=r[6], confirmed_flush_lsn=r[7]) else: value['restart_lsn'] = r[2] replication_slots[r[0]] = value self._replication_slots = replication_slots self._schedule_load_slots = False if self._force_readiness_check: self._logical_slots_processing_queue = {n: None for n, v in replication_slots.items() if v['type'] == 'logical'} self._force_readiness_check = False def ignore_replication_slot(self, cluster: Cluster, name: str) -> bool: """Check if slot *name* should not be managed by Patroni. :param cluster: cluster state information object. :param name: name of the slot to ignore :returns: ``True`` if slot *name* matches any slot specified in ``ignore_slots`` configuration, otherwise will pass through and return result of :meth:`CitusHandler.ignore_replication_slot`. """ slot = self._replication_slots[name] if cluster.config: for matcher in cluster.config.ignore_slots_matchers: if ( (matcher.get("name") is None or matcher["name"] == name) and all(not matcher.get(a) or matcher[a] == slot.get(a) for a in ('database', 'plugin', 'type')) ): return True return self._postgresql.citus_handler.ignore_replication_slot(slot) def drop_replication_slot(self, name: str) -> Tuple[bool, bool]: """Drop a named slot from Postgres. :param name: name of the slot to be dropped. :returns: a tuple of ``active`` and ``dropped``. ``active`` is ``True`` if the slot is active, ``dropped`` is ``True`` if the slot was successfully dropped. If the slot was not found return ``False`` for both. """ rows = self._query(('WITH slots AS (SELECT slot_name, active' ' FROM pg_catalog.pg_replication_slots WHERE slot_name = %s),' ' dropped AS (SELECT pg_catalog.pg_drop_replication_slot(slot_name),' ' true AS dropped FROM slots WHERE not active) ' 'SELECT active, COALESCE(dropped, false) FROM slots' ' FULL OUTER JOIN dropped ON true'), name) return (rows[0][0], rows[0][1]) if rows else (False, False) def _drop_incorrect_slots(self, cluster: Cluster, slots: Dict[str, Any], paused: bool) -> None: """Compare required slots and configured as permanent slots with those found, dropping extraneous ones. .. note:: Slots that are not contained in *slots* will be dropped. Slots can be filtered out with ``ignore_slots`` configuration. Slots that have matching names but do not match attributes in *slots* will also be dropped. :param cluster: cluster state information object. :param slots: dictionary of desired slot names as keys with slot attributes as a dictionary value, if known. :param paused: ``True`` if the patroni cluster is currently in a paused state. """ # drop old replication slots which are not presented in desired slots. for name in set(self._replication_slots) - set(slots): if not paused and not self.ignore_replication_slot(cluster, name): active, dropped = self.drop_replication_slot(name) if dropped: logger.info("Dropped unknown replication slot '%s'", name) else: self._schedule_load_slots = True if active: logger.debug("Unable to drop unknown replication slot '%s', slot is still active", name) else: logger.error("Failed to drop replication slot '%s'", name) # drop slots with matching names but attributes that do not match, e.g. `plugin` or `database`. for name, value in slots.items(): if name in self._replication_slots and not compare_slots(value, self._replication_slots[name]): logger.info("Trying to drop replication slot '%s' because value is changing from %s to %s", name, self._replication_slots[name], value) if self.drop_replication_slot(name) == (False, True): self._replication_slots.pop(name) else: logger.error("Failed to drop replication slot '%s'", name) self._schedule_load_slots = True def _ensure_physical_slots(self, slots: Dict[str, Any]) -> None: """Create or advance physical replication *slots*. Any failures are logged and do not interrupt creation of all *slots*. :param slots: A dictionary mapping slot name to slot attributes. This method only considers a slot if the value is a dictionary with the key ``type`` and a value of ``physical``. """ immediately_reserve = ', true' if self._postgresql.major_version >= 90600 else '' for name, value in slots.items(): if value['type'] != 'physical': continue if name not in self._replication_slots: try: self._query(f"SELECT pg_catalog.pg_create_physical_replication_slot(%s{immediately_reserve})" f" WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_replication_slots" f" WHERE slot_type = 'physical' AND slot_name = %s)", name, name) except Exception: logger.exception("Failed to create physical replication slot '%s'", name) self._schedule_load_slots = True elif self._postgresql.can_advance_slots and self._replication_slots[name]['type'] == 'physical': value['restart_lsn'] = self._replication_slots[name]['restart_lsn'] lsn = value.get('lsn') if lsn and lsn > value['restart_lsn']: # The slot has feedback in DCS and needs to be advanced try: lsn = format_lsn(lsn) self._query("SELECT pg_catalog.pg_replication_slot_advance(%s, %s)", name, lsn) except Exception as exc: logger.error("Error while advancing replication slot %s to position '%s': %r", name, lsn, exc) @contextmanager def get_local_connection_cursor(self, **kwargs: Any) -> Iterator[Union['cursor', 'Cursor[Any]']]: """Create a new database connection to local server. Create a non-blocking connection cursor to avoid the situation where an execution of the query of ``pg_replication_slot_advance`` takes longer than the timeout on a HA loop, which could cause a false failure state. :param kwargs: Any keyword arguments to pass to :func:`psycopg.connect`. :yields: connection cursor object, note implementation varies depending on version of :mod:`psycopg`. """ conn_kwargs = {**self._postgresql.connection_pool.conn_kwargs, **kwargs} with get_connection_cursor(**conn_kwargs) as cur: yield cur def _ensure_logical_slots_primary(self, slots: Dict[str, Any]) -> None: """Create any missing logical replication *slots* on the primary. If the logical slot already exists, copy state information into the replication slots structure stored in the class instance. :param slots: Slots that should exist are supplied in a dictionary, mapping slot name to any attributes. The method will only consider slots that have a value that is a dictionary with a key ``type`` with a value that is ``logical``. """ # Group logical slots to be created by database name logical_slots: Dict[str, Dict[str, Dict[str, Any]]] = defaultdict(dict) for name, value in slots.items(): if value['type'] == 'logical': if self._replication_slots.get(name, {}).get('datoid'): self._copy_items(self._replication_slots[name], value) else: logical_slots[value['database']][name] = value # Create new logical slots for database, values in logical_slots.items(): with self.get_local_connection_cursor(dbname=database) as cur: for name, value in values.items(): try: cur.execute("SELECT pg_catalog.pg_create_logical_replication_slot(%s, %s)" " WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_replication_slots" " WHERE slot_type = 'logical' AND slot_name = %s)", (name, value['plugin'], name)) except Exception as e: logger.error("Failed to create logical replication slot '%s' plugin='%s': %r", name, value['plugin'], e) slots.pop(name) self._schedule_load_slots = True def schedule_advance_slots(self, slots: Dict[str, Dict[str, int]]) -> Tuple[bool, List[str]]: """Wrapper to ensure slots advance daemon thread is started if not already. :param slots: dictionary containing slot information. :return: tuple with the result of the scheduling of slot advancement: ``failed`` and list of slots to copy. """ if not self._advance: self._advance = SlotsAdvanceThread(self) return self._advance.schedule(slots) def _ensure_logical_slots_replica(self, slots: Dict[str, Any]) -> List[str]: """Update logical *slots* on replicas. If the logical slot already exists, copy state information into the replication slots structure stored in the class instance. Slots that exist are also advanced if their ``confirmed_flush_lsn`` is greater than the stored state of the slot. As logical slots can only be created when the primary is available, pass the list of slots that need to be copied back to the caller. They will be created on replicas with :meth:`SlotsHandler.copy_logical_slots`. :param slots: A dictionary mapping slot name to slot attributes. This method only considers a slot if the value is a dictionary with the key ``type`` and a value of ``logical``. :returns: list of slots to be copied from the primary. """ # Group logical slots to be advanced by database name advance_slots: Dict[str, Dict[str, int]] = defaultdict(dict) create_slots: List[str] = [] # Collect logical slots to be created on the replica for name, value in slots.items(): if value['type'] != 'logical': continue # If the logical already exists, copy some information about it into the original structure if name in self._replication_slots and compare_slots(value, self._replication_slots[name]): self._copy_items(self._replication_slots[name], value) if 'lsn' in value and value['confirmed_flush_lsn'] < value['lsn']: # The slot has feedback in DCS # Skip slots that don't need to be advanced advance_slots[value['database']][name] = value['lsn'] elif name not in self._replication_slots and 'lsn' in value: # We want to copy only slots with feedback in a DCS create_slots.append(name) # Slots to be copied from the primary should be removed from the *slots* structure, # otherwise Patroni falsely assumes that they already exist. for name in create_slots: slots.pop(name) error, copy_slots = self.schedule_advance_slots(advance_slots) if error: self._schedule_load_slots = True return create_slots + copy_slots def sync_replication_slots(self, cluster: Cluster, nofailover: bool, replicatefrom: Optional[str] = None, paused: bool = False) -> List[str]: """During the HA loop read, check and alter replication slots found in the cluster. Read physical and logical slots from ``pg_replication_slots``, then compare to those configured in the DCS. Drop any slots that do not match those required by configuration and are not configured as permanent. Create any missing physical slots, or advance their position according to feedback stored in DCS. If we are the primary then create logical slots, otherwise if logical slots are known and active create them on replica nodes by copying slot files from the primary. :param cluster: object containing stateful information for the cluster. :param nofailover: ``True`` if this node has been tagged to not be a failover candidate. :param replicatefrom: the tag containing the node to replicate from. :param paused: ``True`` if the cluster is in maintenance mode. :returns: list of logical replication slots names that should be copied from the primary. """ ret = [] if self._postgresql.major_version >= 90400 and self._postgresql.global_config and cluster.config: try: self.load_replication_slots() slots = cluster.get_replication_slots( self._postgresql.name, self._postgresql.role, nofailover, self._postgresql.major_version, is_standby_cluster=self._postgresql.global_config.is_standby_cluster, show_error=True) self._drop_incorrect_slots(cluster, slots, paused) self._ensure_physical_slots(slots) if self._postgresql.is_primary(): self._logical_slots_processing_queue.clear() self._ensure_logical_slots_primary(slots) else: self.check_logical_slots_readiness(cluster, replicatefrom) ret = self._ensure_logical_slots_replica(slots) self._replication_slots = slots except Exception: logger.exception('Exception when changing replication slots') self._schedule_load_slots = True return ret @contextmanager def _get_leader_connection_cursor(self, leader: Leader) -> Iterator[Union['cursor', 'Cursor[Any]']]: """Create a new database connection to the leader. .. note:: Uses rewind user credentials because it has enough permissions to read files from PGDATA. Sets the options ``connect_timeout`` to ``3`` and ``statement_timeout`` to ``2000``. :param leader: object with information on the leader :yields: connection cursor object, note implementation varies depending on version of ``psycopg``. """ conn_kwargs = leader.conn_kwargs(self._postgresql.config.rewind_credentials) conn_kwargs['dbname'] = self._postgresql.database with get_connection_cursor(connect_timeout=3, options="-c statement_timeout=2000", **conn_kwargs) as cur: yield cur def check_logical_slots_readiness(self, cluster: Cluster, replicatefrom: Optional[str]) -> bool: """Determine whether all known logical slots are synchronised from the leader. 1) Retrieve the current ``catalog_xmin`` value for the physical slot from the cluster leader, and 2) using previously stored list of "unready" logical slots, those which have yet to be checked hence have no stored slot attributes, 3) store logical slot ``catalog_xmin`` when the physical slot ``catalog_xmin`` becomes valid. :param cluster: object containing stateful information for the cluster. :param replicatefrom: name of the member that should be used to replicate from. :returns: ``False`` if any issue while checking logical slots readiness, ``True`` otherwise. """ catalog_xmin = None if self._logical_slots_processing_queue and cluster.leader: slot_name = cluster.get_my_slot_name_on_primary(self._postgresql.name, replicatefrom) try: with self._get_leader_connection_cursor(cluster.leader) as cur: cur.execute("SELECT slot_name, catalog_xmin FROM pg_catalog.pg_get_replication_slots()" " WHERE NOT pg_catalog.pg_is_in_recovery() AND slot_name = ANY(%s)", ([n for n, v in self._logical_slots_processing_queue.items() if v is None] + [slot_name],)) slots = {row[0]: row[1] for row in cur} if slot_name not in slots: logger.warning('Physical slot %s does not exist on the primary', slot_name) return False catalog_xmin = slots.pop(slot_name) except Exception as e: logger.error("Failed to check %s physical slot on the primary: %r", slot_name, e) return False if not self._update_pending_logical_slot_primary(slots, catalog_xmin): return False # since `catalog_xmin` isn't valid further checks don't make any sense self._ready_logical_slots(catalog_xmin) return True def _update_pending_logical_slot_primary(self, slots: Dict[str, Any], catalog_xmin: Optional[int] = None) -> bool: """Store pending logical slot information for ``catalog_xmin`` on the primary. Remember ``catalog_xmin`` of logical slots on the primary when ``catalog_xmin`` of the physical slot became valid. Logical slots on replica will be safe to use after promote when ``catalog_xmin`` of the physical slot overtakes these values. :param slots: dictionary of slot information from the primary :param catalog_xmin: ``catalog_xmin`` of the physical slot used by this replica to stream changes from primary. :returns: ``False`` if any issue was faced while processing, ``True`` otherwise. """ if catalog_xmin is not None: for name, value in slots.items(): self._logical_slots_processing_queue[name] = value return True # Replica isn't streaming or the hot_standby_feedback isn't enabled try: if not self._query("SELECT pg_catalog.current_setting('hot_standby_feedback')::boolean")[0][0]: logger.error('Logical slot failover requires "hot_standby_feedback". Please check postgresql.auto.conf') except Exception as e: logger.error('Failed to check the hot_standby_feedback setting: %r', e) return False def _ready_logical_slots(self, primary_physical_catalog_xmin: Optional[int] = None) -> None: """Ready logical slots by comparing primary physical slot ``catalog_xmin`` to logical ``catalog_xmin``. The logical slot on a replica is safe to use when the physical replica slot on the primary: 1. has a nonzero/non-null ``catalog_xmin`` represented by ``primary_physical_xmin``. 2. has a ``catalog_xmin`` that is not newer (greater) than the ``catalog_xmin`` of any slot on the standby 3. overtook the ``catalog_xmin`` of remembered values of logical slots on the primary. :param primary_physical_catalog_xmin: is the value retrieved from ``pg_catalog.pg_get_replication_slots()`` for the physical replication slot on the primary. """ # Make a copy of processing queue keys as a list as the queue dictionary is modified inside the loop. for name in list(self._logical_slots_processing_queue): primary_logical_catalog_xmin = self._logical_slots_processing_queue[name] standby_logical_slot = self._replication_slots.get(name, {}) standby_logical_catalog_xmin = standby_logical_slot.get('catalog_xmin', 0) if TYPE_CHECKING: # pragma: no cover assert primary_logical_catalog_xmin is not None if ( not standby_logical_slot or primary_physical_catalog_xmin is not None and primary_logical_catalog_xmin <= primary_physical_catalog_xmin <= standby_logical_catalog_xmin ): del self._logical_slots_processing_queue[name] if standby_logical_slot: logger.info('Logical slot %s is safe to be used after a failover', name) def copy_logical_slots(self, cluster: Cluster, create_slots: List[str]) -> None: """Create logical replication slots on standby nodes. :param cluster: object containing stateful information for the cluster. :param create_slots: list of slot names to copy from the primary. """ leader = cluster.leader if not leader: return slots = cluster.get_replication_slots(self._postgresql.name, 'replica', False, self._postgresql.major_version) copy_slots: Dict[str, Dict[str, Any]] = {} with self._get_leader_connection_cursor(leader) as cur: try: cur.execute("SELECT slot_name, slot_type, datname, plugin, catalog_xmin, " "pg_catalog.pg_wal_lsn_diff(confirmed_flush_lsn, '0/0')::bigint, " "pg_catalog.pg_read_binary_file('pg_replslot/' || slot_name || '/state')" " FROM pg_catalog.pg_get_replication_slots() JOIN pg_catalog.pg_database ON datoid = oid" " WHERE NOT pg_catalog.pg_is_in_recovery() AND slot_name = ANY(%s)", (create_slots,)) for r in cur: if r[0] in slots: # slot_name is defined in the global configuration slot = {'type': r[1], 'database': r[2], 'plugin': r[3], 'catalog_xmin': r[4], 'confirmed_flush_lsn': r[5], 'data': r[6]} if compare_slots(slot, slots[r[0]]): copy_slots[r[0]] = slot else: logger.warning('Will not copy the logical slot "%s" due to the configuration mismatch: ' 'configuration=%s, slot on the primary=%s', r[0], slots[r[0]], slot) except Exception as e: logger.error("Failed to copy logical slots from the %s via postgresql connection: %r", leader.name, e) if copy_slots and self._postgresql.stop(): pg_perm.set_permissions_from_data_directory(self._postgresql.data_dir) for name, value in copy_slots.items(): slot_dir = os.path.join(self.pg_replslot_dir, name) slot_tmp_dir = slot_dir + '.tmp' if os.path.exists(slot_tmp_dir): shutil.rmtree(slot_tmp_dir) os.makedirs(slot_tmp_dir) os.chmod(slot_tmp_dir, pg_perm.dir_create_mode) fsync_dir(slot_tmp_dir) slot_filename = os.path.join(slot_tmp_dir, 'state') with open(slot_filename, 'wb') as f: os.chmod(slot_filename, pg_perm.file_create_mode) f.write(value['data']) f.flush() os.fsync(f.fileno()) if os.path.exists(slot_dir): shutil.rmtree(slot_dir) os.rename(slot_tmp_dir, slot_dir) os.chmod(slot_dir, pg_perm.dir_create_mode) fsync_dir(slot_dir) self._logical_slots_processing_queue[name] = None fsync_dir(self.pg_replslot_dir) self._postgresql.start() def schedule(self, value: Optional[bool] = None) -> None: """Schedule the loading of slot information from the database. :param value: the optional value can be used to unschedule if set to ``False`` or force it to be ``True``. If it is omitted the value will be ``True`` if this PostgreSQL node supports slot replication. """ if value is None: value = self._postgresql.major_version >= 90400 self._schedule_load_slots = self._force_readiness_check = value def on_promote(self) -> None: """Entry point from HA cycle used when a standby node is to be promoted to primary. .. note:: If logical replication slot synchronisation is enabled then slot advancement will be triggered. If any logical slots that were copied are yet to be confirmed as ready a warning message will be logged. """ if self._advance: self._advance.on_promote() if self._logical_slots_processing_queue: logger.warning('Logical replication slots that might be unsafe to use after promote: %s', set(self._logical_slots_processing_queue)) patroni-3.2.2/patroni/postgresql/sync.py000066400000000000000000000364051455170150700204140ustar00rootroot00000000000000import logging import re import time from copy import deepcopy from typing import Collection, List, NamedTuple, Tuple, TYPE_CHECKING from ..collections import CaseInsensitiveDict, CaseInsensitiveSet from ..dcs import Cluster from ..psycopg import quote_ident as _quote_ident if TYPE_CHECKING: # pragma: no cover from . import Postgresql logger = logging.getLogger(__name__) SYNC_STANDBY_NAME_RE = re.compile(r'^[A-Za-z_][A-Za-z_0-9\$]*$') SYNC_REP_PARSER_RE = re.compile(r""" (?P [fF][iI][rR][sS][tT] ) | (?P [aA][nN][yY] ) | (?P \s+ ) | (?P [A-Za-z_][A-Za-z_0-9\$]* ) | (?P " (?: [^"]+ | "" )* " ) | (?P [*] ) | (?P \d+ ) | (?P , ) | (?P \( ) | (?P \) ) | (?P . ) """, re.X) def quote_ident(value: str) -> str: """Very simplified version of `psycopg` :func:`quote_ident` function.""" return value if SYNC_STANDBY_NAME_RE.match(value) else _quote_ident(value) class _SSN(NamedTuple): """class representing "synchronous_standby_names" value after parsing. :ivar sync_type: possible values: 'off', 'priority', 'quorum' :ivar has_star: is set to `True` if "synchronous_standby_names" contains '*' :ivar num: how many nodes are required to be synchronous :ivar members: collection of standby names listed in "synchronous_standby_names" """ sync_type: str has_star: bool num: int members: CaseInsensitiveSet _EMPTY_SSN = _SSN('off', False, 0, CaseInsensitiveSet()) def parse_sync_standby_names(value: str) -> _SSN: """Parse postgresql synchronous_standby_names to constituent parts. :param value: the value of `synchronous_standby_names` :returns: :class:`_SSN` object :raises `ValueError`: if the configuration value can not be parsed >>> parse_sync_standby_names('').sync_type 'off' >>> parse_sync_standby_names('FiRsT').sync_type 'priority' >>> 'first' in parse_sync_standby_names('FiRsT').members True >>> set(parse_sync_standby_names('"1"').members) {'1'} >>> parse_sync_standby_names(' a , b ').members == {'a', 'b'} True >>> parse_sync_standby_names(' a , b ').num 1 >>> parse_sync_standby_names('ANY 4("a",*,b)').has_star True >>> parse_sync_standby_names('ANY 4("a",*,b)').num 4 >>> parse_sync_standby_names('1') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError: Unparseable synchronous_standby_names value >>> parse_sync_standby_names('a,') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError: Unparseable synchronous_standby_names value >>> parse_sync_standby_names('ANY 4("a" b,"c c")') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError: Unparseable synchronous_standby_names value >>> parse_sync_standby_names('FIRST 4("a",)') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError: Unparseable synchronous_standby_names value >>> parse_sync_standby_names('2 (,)') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError: Unparseable synchronous_standby_names value """ tokens = [(m.lastgroup, m.group(0), m.start()) for m in SYNC_REP_PARSER_RE.finditer(value) if m.lastgroup != 'space'] if not tokens: return deepcopy(_EMPTY_SSN) if [t[0] for t in tokens[0:3]] == ['any', 'num', 'parenstart'] and tokens[-1][0] == 'parenend': sync_type = 'quorum' num = int(tokens[1][1]) synclist = tokens[3:-1] elif [t[0] for t in tokens[0:3]] == ['first', 'num', 'parenstart'] and tokens[-1][0] == 'parenend': sync_type = 'priority' num = int(tokens[1][1]) synclist = tokens[3:-1] elif [t[0] for t in tokens[0:2]] == ['num', 'parenstart'] and tokens[-1][0] == 'parenend': sync_type = 'priority' num = int(tokens[0][1]) synclist = tokens[2:-1] else: sync_type = 'priority' num = 1 synclist = tokens has_star = False members = CaseInsensitiveSet() for i, (a_type, a_value, a_pos) in enumerate(synclist): if i % 2 == 1: # odd elements are supposed to be commas if len(synclist) == i + 1: # except the last token raise ValueError("Unparseable synchronous_standby_names value %r: Unexpected token %s %r at %d" % (value, a_type, a_value, a_pos)) elif a_type != 'comma': raise ValueError("Unparseable synchronous_standby_names value %r: ""Got token %s %r while" " expecting comma at %d" % (value, a_type, a_value, a_pos)) elif a_type in {'ident', 'first', 'any'}: members.add(a_value) elif a_type == 'star': members.add(a_value) has_star = True elif a_type == 'dquot': members.add(a_value[1:-1].replace('""', '"')) else: raise ValueError("Unparseable synchronous_standby_names value %r: Unexpected token %s %r at %d" % (value, a_type, a_value, a_pos)) return _SSN(sync_type, has_star, num, members) class _Replica(NamedTuple): """Class representing a single replica that is eligible to be synchronous. Attributes are taken from ``pg_stat_replication`` view and respective ``Cluster.members``. :ivar pid: PID of walsender process. :ivar application_name: matches with the ``Member.name``. :ivar sync_state: possible values are: ``async``, ``potential``, ``quorum``, and ``sync``. :ivar lsn: ``write_lsn``, ``flush_lsn``, or ``replay_lsn``, depending on the value of ``synchronous_commit`` GUC. :ivar nofailover: whether the corresponding member has ``nofailover`` tag set to ``True``. """ pid: int application_name: str sync_state: str lsn: int nofailover: bool class _ReplicaList(List[_Replica]): """A collection of :class:``_Replica`` objects. Values are reverse ordered by ``_Replica.sync_state`` and ``_Replica.lsn``. That is, first there will be replicas that have ``sync_state`` == ``sync``, even if they are not the most up-to-date in term of write/flush/replay LSN. It helps to keep the result of chosing new synchronous nodes consistent in case if a synchronous standby member is slowed down OR async node is receiving changes faster than the sync member. Such cases would trigger sync standby member swapping, but only if lag on this member is exceeding a threshold (``maximum_lag_on_syncnode``). :ivar max_lsn: maximum value of ``_Replica.lsn`` among all values. In case if there is just one element in the list we take value of ``pg_current_wal_flush_lsn()``. """ def __init__(self, postgresql: 'Postgresql', cluster: Cluster) -> None: """Create :class:``_ReplicaList`` object. :param postgresql: reference to :class:``Postgresql`` object. :param cluster: currently known cluster state from DCS. """ super().__init__() # We want to prioritize candidates based on `write_lsn``, ``flush_lsn``, or ``replay_lsn``. # Which column exactly to pick depends on the values of ``synchronous_commit`` GUC. sort_col = { 'remote_apply': 'replay', 'remote_write': 'write' }.get(postgresql.synchronous_commit(), 'flush') + '_lsn' members = CaseInsensitiveDict({m.name: m for m in cluster.members}) for row in postgresql.pg_stat_replication(): member = members.get(row['application_name']) # We want to consider only rows from ``pg_stat_replication` that: # 1. are known to be streaming (write/flush/replay LSN are not NULL). # 2. can be mapped to a ``Member`` of the ``Cluster``: # a. ``Member`` doesn't have ``nosync`` tag set; # b. PostgreSQL on the member is known to be running and accepting client connections. if member and row[sort_col] is not None and member.is_running and not member.nosync: self.append(_Replica(row['pid'], row['application_name'], row['sync_state'], row[sort_col], bool(member.nofailover))) # Prefer replicas that are in state ``sync`` and with higher values of ``write``/``flush``/``replay`` LSN. self.sort(key=lambda r: (r.sync_state, r.lsn), reverse=True) self.max_lsn = max(self, key=lambda x: x.lsn).lsn if len(self) > 1 else postgresql.last_operation() class SyncHandler(object): """Class responsible for working with the `synchronous_standby_names`. Sync standbys are chosen based on their state in `pg_stat_replication`. When `synchronous_standby_names` is changed we memorize the `_primary_flush_lsn` and the `current_state()` method will count newly added names as "sync" only when they reached memorized LSN and also reported as "sync" by `pg_stat_replication`""" def __init__(self, postgresql: 'Postgresql') -> None: self._postgresql = postgresql self._synchronous_standby_names = '' # last known value of synchronous_standby_names self._ssn_data = deepcopy(_EMPTY_SSN) self._primary_flush_lsn = 0 # "sync" replication connections, that were verified to reach self._primary_flush_lsn at some point self._ready_replicas = CaseInsensitiveDict({}) # keys: member names, values: connection pids def _handle_synchronous_standby_names_change(self) -> None: """Handles changes of "synchronous_standby_names" GUC. If "synchronous_standby_names" was changed, we need to check that newly added replicas have reached `self._primary_flush_lsn`. Only after that they could be counted as synchronous. """ synchronous_standby_names = self._postgresql.synchronous_standby_names() if synchronous_standby_names == self._synchronous_standby_names: return self._synchronous_standby_names = synchronous_standby_names try: self._ssn_data = parse_sync_standby_names(synchronous_standby_names) except ValueError as e: logger.warning('%s', e) self._ssn_data = deepcopy(_EMPTY_SSN) # Invalidate cache of "sync" connections for app_name in list(self._ready_replicas.keys()): if app_name not in self._ssn_data.members: del self._ready_replicas[app_name] # Newly connected replicas will be counted as sync only when reached self._primary_flush_lsn self._primary_flush_lsn = self._postgresql.last_operation() # Ensure some WAL traffic to move replication self._postgresql.query("""DO $$ BEGIN SET local synchronous_commit = 'off'; PERFORM * FROM pg_catalog.txid_current(); END;$$""") self._postgresql.reset_cluster_info_state(None) # Reset internal cache to query fresh values def _process_replica_readiness(self, cluster: Cluster, replica_list: _ReplicaList) -> None: """Flags replicas as truly "synchronous" when they have caught up with ``_primary_flush_lsn``. :param cluster: current cluster topology from DCS :param replica_list: collection of replicas that we want to evaluate. """ for replica in replica_list: # if standby name is listed in the /sync key we can count it as synchronous, otherwise # it becomes really synchronous when sync_state = 'sync' and it is known that it managed to catch up if replica.application_name not in self._ready_replicas\ and replica.application_name in self._ssn_data.members\ and (cluster.sync.matches(replica.application_name) or replica.sync_state == 'sync' and replica.lsn >= self._primary_flush_lsn): self._ready_replicas[replica.application_name] = replica.pid def current_state(self, cluster: Cluster) -> Tuple[CaseInsensitiveSet, CaseInsensitiveSet]: """Find the best candidates to be the synchronous standbys. Current synchronous standby is always preferred, unless it has disconnected or does not want to be a synchronous standby any longer. Standbys are selected based on values from the global configuration: - `maximum_lag_on_syncnode`: would help swapping unhealthy sync replica in case if it stops responding (or hung). Please set the value high enough so it won't unncessarily swap sync standbys during high loads. Any value less or equal of 0 keeps the behavior backward compatible. Please note that it will not also swap sync standbys in case where all replicas are hung. - `synchronous_node_count`: controlls how many nodes should be set as synchronous. :returns: tuple of candidates :class:`CaseInsensitiveSet` and synchronous standbys :class:`CaseInsensitiveSet`. """ self._handle_synchronous_standby_names_change() replica_list = _ReplicaList(self._postgresql, cluster) self._process_replica_readiness(cluster, replica_list) if TYPE_CHECKING: # pragma: no cover assert self._postgresql.global_config is not None sync_node_count = self._postgresql.global_config.synchronous_node_count\ if self._postgresql.supports_multiple_sync else 1 sync_node_maxlag = self._postgresql.global_config.maximum_lag_on_syncnode candidates = CaseInsensitiveSet() sync_nodes = CaseInsensitiveSet() # Prefer members without nofailover tag. We are relying on the fact that sorts are guaranteed to be stable. for replica in sorted(replica_list, key=lambda x: x.nofailover): if sync_node_maxlag <= 0 or replica_list.max_lsn - replica.lsn <= sync_node_maxlag: candidates.add(replica.application_name) if replica.sync_state == 'sync' and replica.application_name in self._ready_replicas: sync_nodes.add(replica.application_name) if len(candidates) >= sync_node_count: break return candidates, sync_nodes def set_synchronous_standby_names(self, sync: Collection[str]) -> None: """Constructs and sets "synchronous_standby_names" GUC value. :param sync: set of nodes to sync to """ has_asterisk = '*' in sync if has_asterisk: sync = ['*'] else: sync = [quote_ident(x) for x in sync] if self._postgresql.supports_multiple_sync and len(sync) > 1: sync_param = '{0} ({1})'.format(len(sync), ','.join(sync)) else: sync_param = next(iter(sync), None) if not (self._postgresql.config.set_synchronous_standby_names(sync_param) and self._postgresql.state == 'running' and self._postgresql.is_primary()) or has_asterisk: return time.sleep(0.1) # Usualy it takes 1ms to reload postgresql.conf, but we will give it 100ms # Reset internal cache to query fresh values self._postgresql.reset_cluster_info_state(None) # timeline == 0 -- indicates that this is the replica if self._postgresql.get_primary_timeline() > 0: self._handle_synchronous_standby_names_change() patroni-3.2.2/patroni/postgresql/validator.py000066400000000000000000000517341455170150700214270ustar00rootroot00000000000000import abc from copy import deepcopy import logging import os import yaml from typing import Any, Dict, Iterator, List, MutableMapping, Optional, Tuple, Type, Union from ..collections import CaseInsensitiveDict, CaseInsensitiveSet from ..exceptions import PatroniException from ..utils import parse_bool, parse_int, parse_real logger = logging.getLogger(__name__) class _Transformable(abc.ABC): def __init__(self, version_from: int, version_till: Optional[int] = None) -> None: self.__version_from = version_from self.__version_till = version_till @classmethod def get_subclasses(cls) -> Iterator[Type['_Transformable']]: """Recursively get all subclasses of :class:`_Transformable`. :yields: each subclass of :class:`_Transformable`. """ for subclass in cls.__subclasses__(): yield from subclass.get_subclasses() yield subclass @property def version_from(self) -> int: return self.__version_from @property def version_till(self) -> Optional[int]: return self.__version_till @abc.abstractmethod def transform(self, name: str, value: Any) -> Optional[Any]: """Verify that provided value is valid. :param name: GUC's name :param value: GUC's value :returns: the value (sometimes clamped) or ``None`` if the value isn't valid """ class Bool(_Transformable): def transform(self, name: str, value: Any) -> Optional[Any]: if parse_bool(value) is not None: return value logger.warning('Removing bool parameter=%s from the config due to the invalid value=%s', name, value) class Number(_Transformable): def __init__(self, *, version_from: int, version_till: Optional[int] = None, min_val: Union[int, float], max_val: Union[int, float], unit: Optional[str] = None) -> None: super(Number, self).__init__(version_from, version_till) self.__min_val = min_val self.__max_val = max_val self.__unit = unit @property def min_val(self) -> Union[int, float]: return self.__min_val @property def max_val(self) -> Union[int, float]: return self.__max_val @property def unit(self) -> Optional[str]: return self.__unit @staticmethod @abc.abstractmethod def parse(value: Any, unit: Optional[str]) -> Optional[Any]: """Convert provided value to unit.""" def transform(self, name: str, value: Any) -> Union[int, float, None]: num_value = self.parse(value, self.unit) if num_value is not None: if num_value < self.min_val: logger.warning('Value=%s of parameter=%s is too low, increasing to %s%s', value, name, self.min_val, self.unit or '') return self.min_val if num_value > self.max_val: logger.warning('Value=%s of parameter=%s is too big, decreasing to %s%s', value, name, self.max_val, self.unit or '') return self.max_val return value logger.warning('Removing %s parameter=%s from the config due to the invalid value=%s', self.__class__.__name__.lower(), name, value) class Integer(Number): @staticmethod def parse(value: Any, unit: Optional[str]) -> Optional[int]: return parse_int(value, unit) class Real(Number): @staticmethod def parse(value: Any, unit: Optional[str]) -> Optional[float]: return parse_real(value, unit) class Enum(_Transformable): def __init__(self, *, version_from: int, version_till: Optional[int] = None, possible_values: Tuple[str, ...]) -> None: super(Enum, self).__init__(version_from, version_till) self.__possible_values = possible_values @property def possible_values(self) -> Tuple[str, ...]: return self.__possible_values def transform(self, name: str, value: Optional[Any]) -> Optional[Any]: if str(value).lower() in self.possible_values: return value logger.warning('Removing enum parameter=%s from the config due to the invalid value=%s', name, value) class EnumBool(Enum): def transform(self, name: str, value: Optional[Any]) -> Optional[Any]: if parse_bool(value) is not None: return value return super(EnumBool, self).transform(name, value) class String(_Transformable): def transform(self, name: str, value: Optional[Any]) -> Optional[Any]: return value # Format: # key - parameter name # value - variable length tuple of `_Transformable` objects. Each object in the tuple represents a different # validation of the GUC across postgres versions. If a GUC validation has never changed over time, then it will # have a single object in the tuple. For example, `password_encryption` used to be a boolean GUC up to Postgres # 10, at which point it started being an enum. In that case the value of `password_encryption` would be a tuple # of 2 `_Transformable` objects (`Bool` and `Enum`, respectively), each one reprensenting a different # validation rule. parameters = CaseInsensitiveDict() recovery_parameters = CaseInsensitiveDict() class ValidatorFactoryNoType(PatroniException): """Raised when a validator spec misses a type.""" class ValidatorFactoryInvalidType(PatroniException): """Raised when a validator spec contains an invalid type.""" class ValidatorFactoryInvalidSpec(PatroniException): """Raised when a validator spec contains an invalid set of attributes.""" class ValidatorFactory: """Factory class used to build Patroni validator objects based on the given specs.""" TYPES: Dict[str, Type[_Transformable]] = {cls.__name__: cls for cls in _Transformable.get_subclasses()} def __new__(cls, validator: Dict[str, Any]) -> _Transformable: """Parse a given Postgres GUC *validator* into the corresponding Patroni validator object. :param validator: a validator spec for a given parameter. It usually comes from a parsed YAML file. :returns: the Patroni validator object that corresponds to the specification found in *validator*. :raises: :class:`ValidatorFactoryNoType`: if *validator* contains no ``type`` key. :class:`ValidatorFactoryInvalidType`: if ``type`` key from *validator* contains an invalid value. :class:`ValidatorFactoryInvalidSpec`: if *validator* contains an invalid set of attributes for the given ``type``. :Example: If a given validator was defined as follows in the YAML file: ```yaml - type: String version_from: 90300 version_till: null ``` Then this method would receive *validator* as: ```python { 'type': 'String', 'version_from': 90300, 'version_till': None } ``` And this method would return a :class:`String`: ```python String(90300, None) ``` """ validator = deepcopy(validator) try: type_ = validator.pop('type') except KeyError as exc: raise ValidatorFactoryNoType('Validator contains no type.') from exc if type_ not in cls.TYPES: raise ValidatorFactoryInvalidType(f'Unexpected validator type: `{type_}`.') for key, value in validator.items(): # :func:`_transform_parameter_value` expects :class:`tuple` instead of :class:`list` if isinstance(value, list): tmp_value: List[Any] = value validator[key] = tuple(tmp_value) try: return cls.TYPES[type_](**validator) except Exception as exc: raise ValidatorFactoryInvalidSpec( f'Failed to parse `{type_}` validator (`{validator}`): `{str(exc)}`.') from exc def _get_postgres_guc_validators(config: Dict[str, Any], parameter: str) -> Tuple[_Transformable, ...]: """Get all validators of *parameter* from *config*. Loop over all validators specs of *parameter* and return them parsed as Patroni validators. :param config: Python object corresponding to an YAML file, with values of either ``parameters`` or ``recovery_parameters`` key. :param parameter: name of the parameter found under *config* which validators should be parsed and returned. :rtype: yields any exception that is faced while parsing a validator spec into a Patroni validator object. """ validators: List[_Transformable] = [] for validator_spec in config.get(parameter, []): try: validator = ValidatorFactory(validator_spec) validators.append(validator) except (ValidatorFactoryNoType, ValidatorFactoryInvalidType, ValidatorFactoryInvalidSpec) as exc: logger.warning('Faced an issue while parsing a validator for parameter `%s`: `%r`', parameter, exc) return tuple(validators) class InvalidGucValidatorsFile(PatroniException): """Raised when reading or parsing of a YAML file faces an issue.""" def _read_postgres_gucs_validators_file(file: str) -> Dict[str, Any]: """Read an YAML file and return the corresponding Python object. :param file: path to the file to be read. It is expected to be encoded with ``UTF-8``, and to be a YAML document. :returns: the YAML content parsed into a Python object. If any issue is faced while reading/parsing the file, then return ``None``. :raises: :class:`InvalidGucValidatorsFile`: if faces an issue while reading or parsing *file*. """ try: with open(file, encoding='UTF-8') as stream: return yaml.safe_load(stream) except Exception as exc: raise InvalidGucValidatorsFile( f'Unexpected issue while reading parameters file `{file}`: `{str(exc)}`.') from exc def _load_postgres_gucs_validators() -> None: """Load all Postgres GUC validators from YAML files. Recursively walk through ``available_parameters`` directory and load validators of each found YAML file into ``parameters`` and/or ``recovery_parameters`` variables. Walk through directories in top-down fashion and for each of them: * Sort files by name; * Load validators from YAML files that were found. Any problem faced while reading or parsing files will be logged as a ``WARNING`` by the child function, and the corresponding file or validator will be ignored. By default, Patroni only ships the file ``0_postgres.yml``, which contains Community Postgres GUCs validators, but that behavior can be extended. For example: if a vendor wants to add GUC validators to Patroni for covering a custom Postgres build, then they can create their custom YAML files under ``available_parameters`` directory. Each YAML file may contain either or both of these root attributes, here called sections: * ``parameters``: general GUCs that would be written to ``postgresql.conf``; * ``recovery_parameters``: recovery related GUCs that would be written to ``recovery.conf`` (Patroni later writes them to ``postgresql.conf`` if running PG 12 and above). Then, each of these sections, if specified, may contain one or more attributes with the following structure: * key: the name of a GUC; * value: a list of validators. Each item in the list must contain a ``type`` attribute, which must be one among: * ``Bool``; or * ``Integer``; or * ``Real``; or * ``Enum``; or * ``EnumBool``; or * ``String``. Besides the ``type`` attribute, it should also contain all the required attributes as per the corresponding class in this module. .. seealso:: * :class:`Bool`; * :class:`Integer`; * :class:`Real`; * :class:`Enum`; * :class:`EnumBool`; * :class:`String`. :Example: This is a sample content for an YAML file based on Postgres GUCs, showing each of the supported types and sections: .. code-block:: yaml parameters: archive_command: - type: String version_from: 90300 version_till: null archive_mode: - type: Bool version_from: 90300 version_till: 90500 - type: EnumBool version_from: 90500 version_till: null possible_values: - always archive_timeout: - type: Integer version_from: 90300 version_till: null min_val: 0 max_val: 1073741823 unit: s autovacuum_vacuum_cost_delay: - type: Integer version_from: 90300 version_till: 120000 min_val: -1 max_val: 100 unit: ms - type: Real version_from: 120000 version_till: null min_val: -1 max_val: 100 unit: ms client_min_messages: - type: Enum version_from: 90300 version_till: null possible_values: - debug5 - debug4 - debug3 - debug2 - debug1 - log - notice - warning - error recovery_parameters: archive_cleanup_command: - type: String version_from: 90300 version_till: null """ conf_dir = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'available_parameters', ) yaml_files: List[str] = [] for root, _, files in os.walk(conf_dir): for file in sorted(files): full_path = os.path.join(root, file) if file.lower().endswith(('.yml', '.yaml')): yaml_files.append(full_path) else: logger.info('Ignored a non-YAML file found under `available_parameters` directory: `%s`.', full_path) for file in yaml_files: try: config: Dict[str, Any] = _read_postgres_gucs_validators_file(file) except InvalidGucValidatorsFile as exc: logger.warning(str(exc)) continue logger.debug(f'Parsing validators from file `{file}`.') mapping = { 'parameters': parameters, 'recovery_parameters': recovery_parameters, } for section in ['parameters', 'recovery_parameters']: section_var = mapping[section] config_section = config.get(section, {}) for parameter in config_section.keys(): section_var[parameter] = _get_postgres_guc_validators(config_section, parameter) _load_postgres_gucs_validators() def _transform_parameter_value(validators: MutableMapping[str, Tuple[_Transformable, ...]], version: int, name: str, value: Any, available_gucs: CaseInsensitiveSet) -> Optional[Any]: """Validate *value* of GUC *name* for Postgres *version* using defined *validators* and *available_gucs*. :param validators: a dictionary of all GUCs across all Postgres versions. Each key is the name of a Postgres GUC, and the corresponding value is a variable length tuple of :class:`_Transformable`. Each item is a validation rule for the GUC for a given range of Postgres versions. Should either contain recovery GUCs or general GUCs, not both. :param version: Postgres version to validate the GUC against. :param name: name of the Postgres GUC. :param value: value of the Postgres GUC. :param available_gucs: a set of all GUCs available in Postgres *version*. Each item is the name of a Postgres GUC. Used for a couple purposes: * Disallow writing GUCs to ``postgresql.conf`` (or ``recovery.conf``) that does not exist in Postgres *version*; * Avoid ignoring GUC *name* if it does not have a validator in *validators*, but is a valid GUC in Postgres *version*. :returns: the return value may be one among: * *value* transformed to the expected format for GUC *name* in Postgres *version*, if *name* is present in *available_gucs* and has a validator in *validators* for the corresponding Postgres *version*; or * The own *value* if *name* is present in *available_gucs* but not in *validators*; or * ``None`` if *name* is not present in *available_gucs*. """ if name in available_gucs: for validator in validators.get(name, ()) or (): if version >= validator.version_from and\ (validator.version_till is None or version < validator.version_till): return validator.transform(name, value) # Ideally we should have a validator in *validators*. However, if none is available, we will not discard a # setting that exists in Postgres *version*, but rather allow the value with no validation. return value logger.warning('Removing unexpected parameter=%s value=%s from the config', name, value) def transform_postgresql_parameter_value(version: int, name: str, value: Any, available_gucs: CaseInsensitiveSet) -> Optional[Any]: """Validate *value* of GUC *name* for Postgres *version* using ``parameters`` and *available_gucs*. :param version: Postgres version to validate the GUC against. :param name: name of the Postgres GUC. :param value: value of the Postgres GUC. :param available_gucs: a set of all GUCs available in Postgres *version*. Each item is the name of a Postgres GUC. Used for a couple purposes: * Disallow writing GUCs to ``postgresql.conf`` that does not exist in Postgres *version*; * Avoid ignoring GUC *name* if it does not have a validator in ``parameters``, but is a valid GUC in Postgres *version*. :returns: The return value may be one among: * The original *value* if *name* seems to be an extension GUC (contains a period '.'); or * ``None`` if **name** is a recovery GUC; or * *value* transformed to the expected format for GUC *name* in Postgres *version* using validators defined in ``parameters``. Can also return ``None``. See :func:`_transform_parameter_value`. """ if '.' in name and name not in parameters: # likely an extension GUC, so just return as it is. Otherwise, if `name` is in `parameters`, it's likely a # namespaced GUC from a custom Postgres build, so we treat that over the usual validation means. return value if name in recovery_parameters: return None return _transform_parameter_value(parameters, version, name, value, available_gucs) def transform_recovery_parameter_value(version: int, name: str, value: Any, available_gucs: CaseInsensitiveSet) -> Optional[Any]: """Validate *value* of GUC *name* for Postgres *version* using ``recovery_parameters`` and *available_gucs*. :param version: Postgres version to validate the recovery GUC against. :param name: name of the Postgres recovery GUC. :param value: value of the Postgres recovery GUC. :param available_gucs: a set of all GUCs available in Postgres *version*. Each item is the name of a Postgres GUC. Used for a couple purposes: * Disallow writing GUCs to ``recovery.conf`` (or ``postgresql.conf`` depending on *version*), that does not exist in Postgres *version*; * Avoid ignoring recovery GUC *name* if it does not have a validator in ``recovery_parameters``, but is a valid GUC in Postgres *version*. :returns: *value* transformed to the expected format for recovery GUC *name* in Postgres *version* using validators defined in ``recovery_parameters``. It can also return ``None``. See :func:`_transform_parameter_value`. """ # Recovery settings are not present in ``postgres --describe-config`` output of Postgres <= 11. In that case we # just pass down the list of settings defined in Patroni validators so :func:`_transform_parameter_value` will not # discard the recovery GUCs when running Postgres <= 11. # NOTE: At the moment this change was done Postgres 11 was almost EOL, and had been likely extensively used with # Patroni, so we should be able to rely solely on Patroni validators as the source of truth. return _transform_parameter_value( recovery_parameters, version, name, value, available_gucs if version >= 120000 else CaseInsensitiveSet(recovery_parameters.keys())) patroni-3.2.2/patroni/psycopg.py000066400000000000000000000115031455170150700167110ustar00rootroot00000000000000"""Abstraction layer for :mod:`psycopg` module. This module is able to handle both :mod:`pyscopg2` and :mod:`psycopg`, and it exposes a common interface for both. :mod:`psycopg2` takes precedence. :mod:`psycopg` will only be used if :mod:`psycopg2` is either absent or older than ``2.5.4``. """ from typing import Any, Optional, TYPE_CHECKING, Union if TYPE_CHECKING: # pragma: no cover from psycopg import Connection from psycopg2 import connection, cursor __all__ = ['connect', 'quote_ident', 'quote_literal', 'DatabaseError', 'Error', 'OperationalError', 'ProgrammingError'] _legacy = False try: from psycopg2 import __version__ from . import MIN_PSYCOPG2, parse_version if parse_version(__version__) < MIN_PSYCOPG2: raise ImportError from psycopg2 import connect as _connect, Error, DatabaseError, OperationalError, ProgrammingError from psycopg2.extensions import adapt try: from psycopg2.extensions import quote_ident as _quote_ident except ImportError: _legacy = True def quote_literal(value: Any, conn: Optional[Any] = None) -> str: """Quote *value* as a SQL literal. .. note:: *value* is quoted through :mod:`psycopg2` adapters. :param value: value to be quoted. :param conn: if a connection is given then :func:`quote_literal` checks if any special handling based on server parameters needs to be applied to *value* before quoting it as a SQL literal. :returns: *value* quoted as a SQL literal. """ value = adapt(value) if conn: value.prepare(conn) return value.getquoted().decode('utf-8') except ImportError: from psycopg import connect as __connect, sql, Error, DatabaseError, OperationalError, ProgrammingError def _connect(dsn: Optional[str] = None, **kwargs: Any) -> 'Connection[Any]': """Call :func:`psycopg.connect` with *dsn* and ``**kwargs``. .. note:: Will create ``server_version`` attribute in the returning connection, so it keeps compatibility with the object that would be returned by :func:`psycopg2.connect`. :param dsn: DSN to call :func:`psycopg.connect` with. :param kwargs: keyword arguments to call :func:`psycopg.connect` with. :returns: a connection to the database. """ ret = __connect(dsn or "", **kwargs) setattr(ret, 'server_version', ret.pgconn.server_version) # compatibility with psycopg2 return ret def _quote_ident(value: Any, scope: Any) -> str: """Quote *value* as a SQL identifier. :param value: value to be quoted. :param scope: connection to evaluate the returning string into. :returns: *value* quoted as a SQL identifier. """ return sql.Identifier(value).as_string(scope) def quote_literal(value: Any, conn: Optional[Any] = None) -> str: """Quote *value* as a SQL literal. :param value: value to be quoted. :param conn: connection to evaluate the returning string into. :returns: *value* quoted as a SQL literal. """ return sql.Literal(value).as_string(conn) def connect(*args: Any, **kwargs: Any) -> Union['connection', 'Connection[Any]']: """Get a connection to the database. .. note:: The connection will have ``autocommit`` enabled. It also enforces ``search_path=pg_catalog`` for non-replication connections to mitigate security issues as Patroni relies on superuser connections. :param args: positional arguments to call :func:`~psycopg.connect` function from :mod:`psycopg` module. :param kwargs: keyword arguments to call :func:`~psycopg.connect` function from :mod:`psycopg` module. :returns: a connection to the database. Can be either a :class:`psycopg.Connection` if using :mod:`psycopg`, or a :class:`psycopg2.extensions.connection` if using :mod:`psycopg2`. """ if kwargs and 'replication' not in kwargs and kwargs.get('fallback_application_name') != 'Patroni ctl': options = [kwargs['options']] if 'options' in kwargs else [] options.append('-c search_path=pg_catalog') kwargs['options'] = ' '.join(options) ret = _connect(*args, **kwargs) ret.autocommit = True return ret def quote_ident(value: Any, conn: Optional[Union['cursor', 'connection', 'Connection[Any]']] = None) -> str: """Quote *value* as a SQL identifier. :param value: value to be quoted. :param conn: connection to evaluate the returning string into. Can be either a :class:`psycopg.Connection` if using :mod:`psycopg`, or a :class:`psycopg2.extensions.connection` if using :mod:`psycopg2`. :returns: *value* quoted as a SQL identifier. """ if _legacy or conn is None: return '"{0}"'.format(value.replace('"', '""')) return _quote_ident(value, conn) patroni-3.2.2/patroni/raft_controller.py000066400000000000000000000015621455170150700204300ustar00rootroot00000000000000import logging from .config import Config from .daemon import AbstractPatroniDaemon, abstract_main, get_base_arg_parser from .dcs.raft import KVStoreTTL logger = logging.getLogger(__name__) class RaftController(AbstractPatroniDaemon): def __init__(self, config: Config) -> None: super(RaftController, self).__init__(config) kvstore_config = self.config.get('raft') assert 'self_addr' in kvstore_config self._raft = KVStoreTTL(None, None, None, **kvstore_config) def _run_cycle(self) -> None: try: self._raft.doTick(self._raft.conf.autoTickPeriod) except Exception: logger.exception('doTick') def _shutdown(self) -> None: self._raft.destroy() def main() -> None: parser = get_base_arg_parser() args = parser.parse_args() abstract_main(RaftController, args.configfile) patroni-3.2.2/patroni/request.py000066400000000000000000000200451455170150700167160ustar00rootroot00000000000000"""Facilities for handling communication with Patroni's REST API.""" import json import urllib3 from typing import Any, Dict, Optional, Union from urllib.parse import urlparse, urlunparse from .config import Config from .dcs import Member from .utils import USER_AGENT class HTTPSConnectionPool(urllib3.HTTPSConnectionPool): def _validate_conn(self, *args: Any, **kwargs: Any) -> None: """Override parent method to silence warnings about requests without certificate verification enabled.""" class PatroniPoolManager(urllib3.PoolManager): def __init__(self, *args: Any, **kwargs: Any) -> None: super(PatroniPoolManager, self).__init__(*args, **kwargs) self.pool_classes_by_scheme = {'http': urllib3.HTTPConnectionPool, 'https': HTTPSConnectionPool} class PatroniRequest(object): """Wrapper for performing requests to Patroni's REST API. Prepares the request manager with the configured settings before performing the request. """ def __init__(self, config: Union[Config, Dict[str, Any]], insecure: Optional[bool] = None) -> None: """Create a new :class:`PatroniRequest` instance with given *config*. :param config: Patroni YAML configuration. :param insecure: how to deal with SSL certs verification: * If ``True`` it will perform REST API requests without verifying SSL certs; or * If ``False`` it will perform REST API requests and verify SSL certs; or * If ``None`` it will behave according to the value of ``ctl.insecure`` configuration; or * If none of the above applies, then it falls back to ``False``. """ self._insecure = insecure self._pool = PatroniPoolManager(num_pools=10, maxsize=10) self.reload_config(config) @staticmethod def _get_ctl_value(config: Union[Config, Dict[str, Any]], name: str, default: Any = None) -> Optional[Any]: """Get value of *name* setting from the ``ctl`` section of the *config*. :param config: Patroni YAML configuration. :param name: name of the setting value to be retrieved. :returns: value of ``ctl.*name*`` if present, ``None`` otherwise. """ return config.get('ctl', {}).get(name, default) @staticmethod def _get_restapi_value(config: Union[Config, Dict[str, Any]], name: str) -> Optional[Any]: """Get value of *name* setting from the ``restapi`` section of the *config*. :param config: Patroni YAML configuration. :param name: name of the setting value to be retrieved. :returns: value of ``restapi -> *name*`` if present, ``None`` otherwise. """ return config.get('restapi', {}).get(name) def _apply_pool_param(self, param: str, value: Any) -> None: """Configure *param* as *value* in the request manager. :param param: name of the setting to be changed. :param value: new value for *param*. If ``None``, ``0``, ``False``, and similar values, then explicit *param* declaration is removed, in which case it takes its default value, if any. """ if value: self._pool.connection_pool_kw[param] = value else: self._pool.connection_pool_kw.pop(param, None) def _apply_ssl_file_param(self, config: Union[Config, Dict[str, Any]], name: str) -> Union[str, None]: """Apply a given SSL related param to the request manager. :param config: Patroni YAML configuration. :param name: prefix of the Patroni SSL related setting name. Currently, supports these: * ``cert``: gets translated to ``certfile`` * ``key``: gets translated to ``keyfile`` Will attempt to fetch the requested key first from ``ctl`` section. :returns: value of ``ctl.*name*file`` if present, ``None`` otherwise. """ value = self._get_ctl_value(config, name + 'file') self._apply_pool_param(name + '_file', value) return value def reload_config(self, config: Union[Config, Dict[str, Any]]) -> None: """Apply *config* to request manager. Configure these HTTP headers for requests: * ``authorization``: based on Patroni' CTL or REST API authentication config; * ``user-agent``: based on ``patroni.utils.USER_AGENT``. Also configure SSL related settings for requests: * ``ca_certs`` is configured if ``ctl.cacert`` or ``restapi.cafile`` is available; * ``cert``, ``key`` and ``key_password`` are configured if ``ctl.certfile`` is available. :param config: Patroni YAML configuration. """ # ``ctl -> auth`` is equivalent to ``ctl -> authentication -> username`` + ``:`` + # ``ctl -> authentication -> password``. And the same for ``restapi -> auth`` basic_auth = self._get_ctl_value(config, 'auth') or self._get_restapi_value(config, 'auth') self._pool.headers = urllib3.make_headers(basic_auth=basic_auth, user_agent=USER_AGENT) self._pool.connection_pool_kw['cert_reqs'] = 'CERT_REQUIRED' insecure = self._insecure if isinstance(self._insecure, bool)\ else self._get_ctl_value(config, 'insecure', False) if self._apply_ssl_file_param(config, 'cert'): if insecure: # The assert_hostname = False helps to silence warnings self._pool.connection_pool_kw['assert_hostname'] = False self._apply_ssl_file_param(config, 'key') password = self._get_ctl_value(config, 'keyfile_password') self._apply_pool_param('key_password', password) else: if insecure: # Disable server certificate validation if requested self._pool.connection_pool_kw['cert_reqs'] = 'CERT_NONE' self._pool.connection_pool_kw.pop('assert_hostname', None) self._pool.connection_pool_kw.pop('key_file', None) cacert = self._get_ctl_value(config, 'cacert') or self._get_restapi_value(config, 'cafile') self._apply_pool_param('ca_certs', cacert) def request(self, method: str, url: str, body: Optional[Any] = None, **kwargs: Any) -> urllib3.response.HTTPResponse: """Perform an HTTP request. :param method: the HTTP method to be used, e.g. ``GET``. :param url: the URL to be requested. :param body: anything to be used as the request body. :param kwargs: keyword arguments to be passed to :func:`urllib3.PoolManager.request`. :returns: the response returned upon request. """ if body is not None and not isinstance(body, str): body = json.dumps(body) return self._pool.request(method.upper(), url, body=body, **kwargs) def __call__(self, member: Member, method: str = 'GET', endpoint: Optional[str] = None, data: Optional[Any] = None, **kwargs: Any) -> urllib3.response.HTTPResponse: """Turn :class:`PatroniRequest` into a callable object. When called, perform a request through the manager. :param member: DCS member so we can fetch from it the configured base URL for the REST API. :param method: HTTP method to be used, e.g. ``GET``. :param endpoint: URL path of this request, e.g. ``switchover``. :param data: anything to be used as the request body. :returns: the response returned upon request. """ url = member.api_url or '' if endpoint: scheme, netloc, _, _, _, _ = urlparse(url) url = urlunparse((scheme, netloc, endpoint, '', '', '')) return self.request(method, url, data, **kwargs) def get(url: str, verify: bool = True, **kwargs: Any) -> urllib3.response.HTTPResponse: """Perform an HTTP GET request. .. note:: It uses :class:`PatroniRequest` so all relevant configuration is applied before processing the request. :param url: full URL for this GET request. :param verify: if it should verify SSL certificates when processing the request. :returns: the response returned from the request. """ http = PatroniRequest({}, not verify) return http.request('GET', url, **kwargs) patroni-3.2.2/patroni/scripts/000077500000000000000000000000001455170150700163425ustar00rootroot00000000000000patroni-3.2.2/patroni/scripts/__init__.py000066400000000000000000000000001455170150700204410ustar00rootroot00000000000000patroni-3.2.2/patroni/scripts/aws.py000077500000000000000000000062421455170150700175150ustar00rootroot00000000000000#!/usr/bin/env python import json import logging import sys import boto3 from botocore.exceptions import ClientError from botocore.utils import IMDSFetcher from typing import Any, Optional from ..utils import Retry, RetryFailedError logger = logging.getLogger(__name__) class AWSConnection(object): def __init__(self, cluster_name: Optional[str]) -> None: self.available = False self.cluster_name = cluster_name if cluster_name is not None else 'unknown' self._retry = Retry(deadline=300, max_delay=30, max_tries=-1, retry_exceptions=ClientError) try: # get the instance id fetcher = IMDSFetcher(timeout=2.1) token = fetcher._fetch_metadata_token() r = fetcher._get_request("/latest/dynamic/instance-identity/document", None, token) except Exception: logger.error('cannot query AWS meta-data') return if r.status_code < 400: try: content = json.loads(r.text) self.instance_id = content['instanceId'] self.region = content['region'] except Exception: logger.exception('unable to fetch instance id and region from AWS meta-data') return self.available = True def retry(self, *args: Any, **kwargs: Any) -> Any: return self._retry.copy()(*args, **kwargs) def aws_available(self) -> bool: return self.available def _tag_ebs(self, conn: Any, role: str) -> None: """ set tags, carrying the cluster name, instance role and instance id for the EBS storage """ tags = [{'Key': 'Name', 'Value': 'spilo_' + self.cluster_name}, {'Key': 'Role', 'Value': role}, {'Key': 'Instance', 'Value': self.instance_id}] volumes = conn.volumes.filter(Filters=[{'Name': 'attachment.instance-id', 'Values': [self.instance_id]}]) conn.create_tags(Resources=[v.id for v in volumes], Tags=tags) def _tag_ec2(self, conn: Any, role: str) -> None: """ tag the current EC2 instance with a cluster role """ tags = [{'Key': 'Role', 'Value': role}] conn.create_tags(Resources=[self.instance_id], Tags=tags) def on_role_change(self, new_role: str) -> bool: if not self.available: return False try: conn = boto3.resource('ec2', region_name=self.region) # type: ignore self.retry(self._tag_ec2, conn, new_role) self.retry(self._tag_ebs, conn, new_role) except RetryFailedError: logger.warning("Unable to communicate to AWS " "when setting tags for the EC2 instance {0} " "and attached EBS volumes".format(self.instance_id)) return False return True def main(): logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=logging.INFO) if len(sys.argv) == 4 and sys.argv[1] in ('on_start', 'on_stop', 'on_role_change'): AWSConnection(cluster_name=sys.argv[3]).on_role_change(sys.argv[2]) else: sys.exit("Usage: {0} action role name".format(sys.argv[0])) if __name__ == '__main__': main() patroni-3.2.2/patroni/scripts/wale_restore.py000077500000000000000000000347171455170150700214260ustar00rootroot00000000000000#!/usr/bin/env python # sample script to clone new replicas using WAL-E restore # falls back to pg_basebackup if WAL-E restore fails, or if # WAL-E backup is too far behind # note that pg_basebackup still expects to use restore from # WAL-E for transaction logs # theoretically should work with SWIFT, but not tested on it # arguments are: # - cluster scope # - cluster role # - leader connection string # - number of retries # - envdir for the WALE env # - WALE_BACKUP_THRESHOLD_MEGABYTES if WAL amount is above that - use pg_basebackup # - WALE_BACKUP_THRESHOLD_PERCENTAGE if WAL size exceeds a certain percentage of the # this script depends on an envdir defining the S3 bucket (or SWIFT dir),and login # credentials per WALE Documentation. # currently also requires that you configure the restore_command to use wal_e, example: # recovery_conf: # restore_command: envdir /etc/wal-e.d/env wal-e wal-fetch "%f" "%p" -p 1 import argparse import csv import logging import os import subprocess import sys import time from enum import IntEnum from typing import Any, List, NamedTuple, Optional, Tuple, TYPE_CHECKING from .. import psycopg logger = logging.getLogger(__name__) RETRY_SLEEP_INTERVAL = 1 si_prefixes = ['K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'] # Meaningful names to the exit codes used by WALERestore class ExitCode(IntEnum): SUCCESS = 0 #: Succeeded RETRY_LATER = 1 #: External issue, retry later FAIL = 2 #: Don't try again unless configuration changes # We need to know the current PG version in order to figure out the correct WAL directory name def get_major_version(data_dir: str) -> float: version_file = os.path.join(data_dir, 'PG_VERSION') if os.path.isfile(version_file): # version file exists try: with open(version_file) as f: return float(f.read()) except Exception: logger.exception('Failed to read PG_VERSION from %s', data_dir) return 0.0 def repr_size(n_bytes: float) -> str: """ >>> repr_size(1000) '1000 Bytes' >>> repr_size(8257332324597) '7.5 TiB' """ if n_bytes < 1024: return '{0} Bytes'.format(n_bytes) i = -1 while n_bytes > 1023: n_bytes /= 1024.0 i += 1 return '{0} {1}iB'.format(round(n_bytes, 1), si_prefixes[i]) def size_as_bytes(size: float, prefix: str) -> int: """ >>> size_as_bytes(7.5, 'T') 8246337208320 """ prefix = prefix.upper() assert prefix in si_prefixes exponent = si_prefixes.index(prefix) + 1 return int(size * (1024.0 ** exponent)) class WALEConfig(NamedTuple): env_dir: str threshold_mb: int threshold_pct: int cmd: List[str] class WALERestore(object): def __init__(self, scope: str, datadir: str, connstring: str, env_dir: str, threshold_mb: int, threshold_pct: int, use_iam: int, no_leader: bool, retries: int) -> None: self.scope = scope self.leader_connection = connstring self.data_dir = datadir self.no_leader = no_leader wale_cmd = [ 'envdir', env_dir, 'wal-e', ] if use_iam == 1: wale_cmd += ['--aws-instance-profile'] self.wal_e = WALEConfig( env_dir=env_dir, threshold_mb=threshold_mb, threshold_pct=threshold_pct, cmd=wale_cmd, ) self.init_error = (not os.path.exists(self.wal_e.env_dir)) self.retries = retries def run(self) -> int: """ Creates a new replica using WAL-E Returns ------- ExitCode 0 = Success 1 = Error, try again 2 = Error, don't try again """ if self.init_error: logger.error('init error: %r did not exist at initialization time', self.wal_e.env_dir) return ExitCode.FAIL try: should_use_s3 = self.should_use_s3_to_create_replica() if should_use_s3 is None: # Need to retry return ExitCode.RETRY_LATER elif should_use_s3: return self.create_replica_with_s3() elif not should_use_s3: return ExitCode.FAIL except Exception: logger.exception("Unhandled exception when running WAL-E restore") return ExitCode.FAIL def should_use_s3_to_create_replica(self) -> Optional[bool]: """ determine whether it makes sense to use S3 and not pg_basebackup """ threshold_megabytes = self.wal_e.threshold_mb threshold_percent = self.wal_e.threshold_pct try: cmd = self.wal_e.cmd + ['backup-list', '--detail', 'LATEST'] logger.debug('calling %r', cmd) wale_output = subprocess.check_output(cmd) reader = csv.DictReader(wale_output.decode('utf-8').splitlines(), dialect='excel-tab') rows = list(reader) if not len(rows): logger.warning('wal-e did not find any backups') return False # This check might not add much, it was performed in the previous # version of this code. since the old version rolled CSV parsing the # check may have been part of the CSV parsing. if len(rows) > 1: logger.warning( 'wal-e returned more than one row of backups: %r', rows) return False backup_info = rows[0] except subprocess.CalledProcessError: logger.exception("could not query wal-e latest backup") return None try: backup_size = int(backup_info['expanded_size_bytes']) backup_start_segment = backup_info['wal_segment_backup_start'] backup_start_offset = backup_info['wal_segment_offset_backup_start'] except KeyError: logger.exception("unable to get some of WALE backup parameters") return None # WAL filename is XXXXXXXXYYYYYYYY000000ZZ, where X - timeline, Y - LSN logical log file, # ZZ - 2 high digits of LSN offset. The rest of the offset is the provided decimal offset, # that we have to convert to hex and 'prepend' to the high offset digits. lsn_segment = backup_start_segment[8:16] # first 2 characters of the result are 0x and the last one is L lsn_offset = hex((int(backup_start_segment[16:32], 16) << 24) + int(backup_start_offset))[2:-1] # construct the LSN from the segment and offset backup_start_lsn = '{0}/{1}'.format(lsn_segment, lsn_offset) diff_in_bytes = backup_size attempts_no = 0 while True: if self.leader_connection: con = None try: # get the difference in bytes between the current WAL location and the backup start offset con = psycopg.connect(self.leader_connection) if getattr(con, 'server_version', 0) >= 100000: wal_name = 'wal' lsn_name = 'lsn' else: wal_name = 'xlog' lsn_name = 'location' with con.cursor() as cur: cur.execute(("SELECT CASE WHEN pg_catalog.pg_is_in_recovery()" " THEN GREATEST(pg_catalog.pg_{0}_{1}_diff(COALESCE(" "pg_last_{0}_receive_{1}(), '0/0'), %s)::bigint, " "pg_catalog.pg_{0}_{1}_diff(pg_catalog.pg_last_{0}_replay_{1}(), %s)::bigint)" " ELSE pg_catalog.pg_{0}_{1}_diff(pg_catalog.pg_current_{0}_{1}(), %s)::bigint" " END").format(wal_name, lsn_name), (backup_start_lsn, backup_start_lsn, backup_start_lsn)) for row in cur: diff_in_bytes = int(row[0]) break except psycopg.Error: logger.exception('could not determine difference with the leader location') if attempts_no < self.retries: # retry in case of a temporarily connection issue attempts_no = attempts_no + 1 time.sleep(RETRY_SLEEP_INTERVAL) continue else: if not self.no_leader: return False # do no more retries on the outer level logger.info("continue with base backup from S3 since leader is not available") diff_in_bytes = 0 break finally: if con: con.close() else: # always try to use WAL-E if leader connection string is not available diff_in_bytes = 0 break # if the size of the accumulated WAL segments is more than a certain percentage of the backup size # or exceeds the pre-determined size - pg_basebackup is chosen instead. is_size_thresh_ok = diff_in_bytes < int(threshold_megabytes) * 1048576 threshold_pct_bytes = backup_size * threshold_percent / 100.0 is_percentage_thresh_ok = float(diff_in_bytes) < int(threshold_pct_bytes) are_thresholds_ok = is_size_thresh_ok and is_percentage_thresh_ok class Size(object): def __init__(self, n_bytes: float, prefix: Optional[str] = None) -> None: self.n_bytes = n_bytes self.prefix = prefix def __repr__(self) -> str: if self.prefix is not None: n_bytes = size_as_bytes(self.n_bytes, self.prefix) else: n_bytes = self.n_bytes return repr_size(n_bytes) class HumanContext(object): def __init__(self, items: List[Tuple[str, Any]]) -> None: self.items = items def __repr__(self) -> str: return ', '.join('{}={!r}'.format(key, value) for key, value in self.items) human_context = repr(HumanContext([ ('threshold_size', Size(threshold_megabytes, 'M')), ('threshold_percent', threshold_percent), ('threshold_percent_size', Size(threshold_pct_bytes)), ('backup_size', Size(backup_size)), ('backup_diff', Size(diff_in_bytes)), ('is_size_thresh_ok', is_size_thresh_ok), ('is_percentage_thresh_ok', is_percentage_thresh_ok), ])) if not are_thresholds_ok: logger.info('wal-e backup size diff is over threshold, falling back ' 'to other means of restore: %s', human_context) else: logger.info('Thresholds are OK, using wal-e basebackup: %s', human_context) return are_thresholds_ok def fix_subdirectory_path_if_broken(self, dirname: str) -> bool: # in case it is a symlink pointing to a non-existing location, remove it and create the actual directory path = os.path.join(self.data_dir, dirname) if not os.path.exists(path): if os.path.islink(path): # broken xlog symlink, to remove try: os.remove(path) except OSError: logger.exception("could not remove broken %s symlink pointing to %s", dirname, os.readlink(path)) return False try: os.mkdir(path) except OSError: logger.exception("could not create missing %s directory path", dirname) return False return True def create_replica_with_s3(self) -> int: # if we're set up, restore the replica using fetch latest try: cmd = self.wal_e.cmd + ['backup-fetch', '{}'.format(self.data_dir), 'LATEST'] logger.debug('calling: %r', cmd) exit_code = subprocess.call(cmd) except Exception as e: logger.error('Error when fetching backup with WAL-E: {0}'.format(e)) return ExitCode.RETRY_LATER if (exit_code == 0 and not self.fix_subdirectory_path_if_broken('pg_xlog' if get_major_version(self.data_dir) < 10 else 'pg_wal')): return ExitCode.FAIL return exit_code def main() -> int: logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=logging.INFO) parser = argparse.ArgumentParser(description='Script to image replicas using WAL-E') parser.add_argument('--scope', required=True) parser.add_argument('--role', required=False) parser.add_argument('--datadir', required=True) parser.add_argument('--connstring', required=True) parser.add_argument('--retries', type=int, default=1) parser.add_argument('--envdir', required=True) parser.add_argument('--threshold_megabytes', type=int, default=10240) parser.add_argument('--threshold_backup_size_percentage', type=int, default=30) parser.add_argument('--use_iam', type=int, default=0) parser.add_argument('--no_leader', '--no_master', type=int, default=0) args = parser.parse_args() exit_code = None assert args.retries >= 0 # Retry cloning in a loop. We do separate retries for the leader # connection attempt inside should_use_s3_to_create_replica, # because we need to differentiate between the last attempt and # the rest and make a decision when the last attempt fails on # whether to use WAL-E or not depending on the no_leader flag. for _ in range(0, args.retries + 1): restore = WALERestore(scope=args.scope, datadir=args.datadir, connstring=args.connstring, env_dir=args.envdir, threshold_mb=args.threshold_megabytes, threshold_pct=args.threshold_backup_size_percentage, use_iam=args.use_iam, no_leader=args.no_leader, retries=args.retries) exit_code = restore.run() if exit_code != ExitCode.RETRY_LATER: # only WAL-E failures lead to the retry logger.debug('exit_code is %r, not retrying', exit_code) break time.sleep(RETRY_SLEEP_INTERVAL) if TYPE_CHECKING: # pragma: no cover assert exit_code is not None return exit_code if __name__ == '__main__': sys.exit(main()) patroni-3.2.2/patroni/tags.py000066400000000000000000000071421455170150700161670ustar00rootroot00000000000000"""Tags handling.""" import abc from typing import Any, Dict, Optional from patroni.utils import parse_int class Tags(abc.ABC): """An abstract class that encapsulates all the ``tags`` logic. Child classes that want to use provided facilities must implement ``tags`` abstract property. """ @staticmethod def _filter_tags(tags: Dict[str, Any]) -> Dict[str, Any]: """Get tags configured for this node, if any. Handle both predefined Patroni tags and custom defined tags. .. note:: A custom tag is any tag added to the configuration ``tags`` section that is not one of ``clonefrom``, ``nofailover``, ``noloadbalance`` or ``nosync``. For most of the Patroni predefined tags, the returning object will only contain them if they are enabled as they all are boolean values that default to disabled. However ``nofailover`` tag is always returned if ``failover_priority`` tag is defined. In this case, we need both values to see if they are contradictory and the ``nofailover`` value should be used. :returns: a dictionary of tags set for this node. The key is the tag name, and the value is the corresponding tag value. """ return {tag: value for tag, value in tags.items() if any((tag not in ('clonefrom', 'nofailover', 'noloadbalance', 'nosync'), value, tag == 'nofailover' and 'failover_priority' in tags))} @property @abc.abstractmethod def tags(self) -> Dict[str, Any]: """Configured tags. Must be implemented in a child class. """ raise NotImplementedError # pragma: no cover @property def clonefrom(self) -> bool: """``True`` if ``clonefrom`` tag is ``True``, else ``False``.""" return self.tags.get('clonefrom', False) @property def nofailover(self) -> bool: """Common logic for obtaining the value of ``nofailover`` from ``tags`` if defined. If ``nofailover`` is not defined, this methods returns ``True`` if ``failover_priority`` is non-positive, ``False`` otherwise. """ from_tags = self.tags.get('nofailover') if from_tags is not None: # Value of `nofailover` takes precedence over `failover_priority` return bool(from_tags) failover_priority = parse_int(self.tags.get('failover_priority')) return failover_priority is not None and failover_priority <= 0 @property def failover_priority(self) -> int: """Common logic for obtaining the value of ``failover_priority`` from ``tags`` if defined. If ``nofailover`` is defined as ``True``, this will return ``0``. Otherwise, it will return the value of ``failover_priority``, defaulting to ``1`` if it's not defined or invalid. """ from_tags = self.tags.get('nofailover') failover_priority = parse_int(self.tags.get('failover_priority')) failover_priority = 1 if failover_priority is None else failover_priority return 0 if from_tags else failover_priority @property def noloadbalance(self) -> bool: """``True`` if ``noloadbalance`` is ``True``, else ``False``.""" return bool(self.tags.get('noloadbalance', False)) @property def nosync(self) -> bool: """``True`` if ``nosync`` is ``True``, else ``False``.""" return bool(self.tags.get('nosync', False)) @property def replicatefrom(self) -> Optional[str]: """Value of ``replicatefrom`` tag, if any.""" return self.tags.get('replicatefrom') patroni-3.2.2/patroni/utils.py000066400000000000000000001175141455170150700163760ustar00rootroot00000000000000"""Utilitary objects and functions that can be used throughout Patroni code. :var tzutc: UTC time zone info object. :var logger: logger of this module. :var USER_AGENT: identifies the Patroni version, Python version, and the underlying platform. :var OCT_RE: regular expression to match octal numbers, signed or unsigned. :var DEC_RE: regular expression to match decimal numbers, signed or unsigned. :var HEX_RE: regular expression to match hex strings, signed or unsigned. :var DBL_RE: regular expression to match double precision numbers, signed or unsigned. Matches scientific notation too. :var WHITESPACE_RE: regular expression to match whitespace characters """ import errno import logging import os import platform import random import re import socket import subprocess import sys import tempfile import time from shlex import split from typing import Any, Callable, Dict, Iterator, List, Optional, Union, Tuple, Type, TYPE_CHECKING from dateutil import tz from json import JSONDecoder from urllib3.response import HTTPResponse from .exceptions import PatroniException from .version import __version__ if TYPE_CHECKING: # pragma: no cover from .dcs import Cluster from .config import GlobalConfig tzutc = tz.tzutc() logger = logging.getLogger(__name__) USER_AGENT = 'Patroni/{0} Python/{1} {2}'.format(__version__, platform.python_version(), platform.system()) OCT_RE = re.compile(r'^[-+]?0[0-7]*') DEC_RE = re.compile(r'^[-+]?(0|[1-9][0-9]*)') HEX_RE = re.compile(r'^[-+]?0x[0-9a-fA-F]+') DBL_RE = re.compile(r'^[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?') WHITESPACE_RE = re.compile(r'[ \t\n\r]*', re.VERBOSE | re.MULTILINE | re.DOTALL) def deep_compare(obj1: Dict[Any, Union[Any, Dict[Any, Any]]], obj2: Dict[Any, Union[Any, Dict[Any, Any]]]) -> bool: """Recursively compare two dictionaries to check if they are equal in terms of keys and values. .. note:: Values are compared based on their string representation. :param obj1: dictionary to be compared with *obj2*. :param obj2: dictionary to be compared with *obj1*. :returns: ``True`` if all keys and values match between the two dictionaries. :Example: >>> deep_compare({'1': None}, {}) False >>> deep_compare({'1': {}}, {'1': None}) False >>> deep_compare({'1': [1]}, {'1': [2]}) False >>> deep_compare({'1': 2}, {'1': '2'}) True >>> deep_compare({'1': {'2': [3, 4]}}, {'1': {'2': [3, 4]}}) True """ if set(list(obj1.keys())) != set(list(obj2.keys())): # Objects have different sets of keys return False for key, value in obj1.items(): if isinstance(value, dict): if not (isinstance(obj2[key], dict) and deep_compare(value, obj2[key])): return False elif str(value) != str(obj2[key]): return False return True def patch_config(config: Dict[Any, Union[Any, Dict[Any, Any]]], data: Dict[Any, Union[Any, Dict[Any, Any]]]) -> bool: """Update and append to dictionary *config* from overrides in *data*. .. note:: * If the value of a given key in *data* is ``None``, then the key is removed from *config*; * If a key is present in *data* but not in *config*, the key with the corresponding value is added to *config* * For keys that are present on both sides it will compare the string representation of the corresponding values, if the comparison doesn't match override the value :param config: configuration to be patched. :param data: new configuration values to patch *config* with. :returns: ``True`` if *config* was changed. """ is_changed = False for name, value in data.items(): if value is None: if config.pop(name, None) is not None: is_changed = True elif name in config: if isinstance(value, dict): if isinstance(config[name], dict): if patch_config(config[name], value): is_changed = True else: config[name] = value is_changed = True elif str(config[name]) != str(value): config[name] = value is_changed = True else: config[name] = value is_changed = True return is_changed def parse_bool(value: Any) -> Union[bool, None]: """Parse a given value to a :class:`bool` object. .. note:: The parsing is case-insensitive, and takes into consideration these values: * ``on``, ``true``, ``yes``, and ``1`` as ``True``. * ``off``, ``false``, ``no``, and ``0`` as ``False``. :param value: value to be parsed to :class:`bool`. :returns: the parsed value. If not able to parse, returns ``None``. :Example: >>> parse_bool(1) True >>> parse_bool('off') False >>> parse_bool('foo') """ value = str(value).lower() if value in ('on', 'true', 'yes', '1'): return True if value in ('off', 'false', 'no', '0'): return False def strtol(value: Any, strict: Optional[bool] = True) -> Tuple[Union[int, None], str]: """Extract the long integer part from the beginning of a string that represents a configuration value. As most as possible close equivalent of ``strtol(3)`` C function (with base=0), which is used by postgres to parse parameter values. Takes into consideration numbers represented either as hex, octal or decimal formats. :param value: any value from which we want to extract a long integer. :param strict: dictates how the first item in the returning tuple is set when :func:`strtol` is not able to find a long integer in *value*. If *strict* is ``True``, then the first item will be ``None``, else it will be ``1``. :returns: the first item is the extracted long integer from *value*, and the second item is the remaining string of *value*. If not able to match a long integer in *value*, then the first item will be either ``None`` or ``1`` (depending on *strict* argument), and the second item will be the original *value*. :Example: >>> strtol(0) == (0, '') True >>> strtol(1) == (1, '') True >>> strtol(9) == (9, '') True >>> strtol(' +0x400MB') == (1024, 'MB') True >>> strtol(' -070d') == (-56, 'd') True >>> strtol(' d ') == (None, 'd') True >>> strtol(' 1 d ') == (1, ' d') True >>> strtol('9s', False) == (9, 's') True >>> strtol(' s ', False) == (1, 's') True """ value = str(value).strip() for regex, base in ((HEX_RE, 16), (OCT_RE, 8), (DEC_RE, 10)): match = regex.match(value) if match: end = match.end() return int(value[:end], base), value[end:] return (None if strict else 1), value def strtod(value: Any) -> Tuple[Union[float, None], str]: """Extract the double precision part from the beginning of a string that reprensents a configuration value. As most as possible close equivalent of ``strtod(3)`` C function, which is used by postgres to parse parameter values. :param value: any value from which we want to extract a double precision. :returns: the first item is the extracted double precision from *value*, and the second item is the remaining string of *value*. If not able to match a double precision in *value*, then the first item will be ``None``, and the second item will be the original *value*. :Example: >>> strtod(' A ') == (None, 'A') True >>> strtod('1 A ') == (1.0, ' A') True >>> strtod('1.5A') == (1.5, 'A') True >>> strtod('8.325e-10A B C') == (8.325e-10, 'A B C') True """ value = str(value).strip() match = DBL_RE.match(value) if match: end = match.end() return float(value[:end]), value[end:] return None, value def convert_to_base_unit(value: Union[int, float], unit: str, base_unit: Optional[str]) -> Union[int, float, None]: """Convert *value* as a *unit* of compute information or time to *base_unit*. :param value: value to be converted to the base unit. :param unit: unit of *value*. Accepts these units (case sensitive): * For space: ``B``, ``kB``, ``MB``, ``GB``, or ``TB``; * For time: ``d``, ``h``, ``min``, ``s``, ``ms``, or ``us``. :param base_unit: target unit in the conversion. May contain the target unit with an associated value, e.g ``512MB``. Accepts these units (case sensitive): * For space: ``B``, ``kB``, or ``MB``; * For time: ``ms``, ``s``, or ``min``. :returns: *value* in *unit* converted to *base_unit*. Returns ``None`` if *unit* or *base_unit* is invalid. :Example: >>> convert_to_base_unit(1, 'GB', '256MB') 4 >>> convert_to_base_unit(1, 'GB', 'MB') 1024 >>> convert_to_base_unit(1, 'gB', '512MB') is None True >>> convert_to_base_unit(1, 'GB', '512 MB') is None True """ convert: Dict[str, Dict[str, Union[int, float]]] = { 'B': {'B': 1, 'kB': 1024, 'MB': 1024 * 1024, 'GB': 1024 * 1024 * 1024, 'TB': 1024 * 1024 * 1024 * 1024}, 'kB': {'B': 1.0 / 1024, 'kB': 1, 'MB': 1024, 'GB': 1024 * 1024, 'TB': 1024 * 1024 * 1024}, 'MB': {'B': 1.0 / (1024 * 1024), 'kB': 1.0 / 1024, 'MB': 1, 'GB': 1024, 'TB': 1024 * 1024}, 'ms': {'us': 1.0 / 1000, 'ms': 1, 's': 1000, 'min': 1000 * 60, 'h': 1000 * 60 * 60, 'd': 1000 * 60 * 60 * 24}, 's': {'us': 1.0 / (1000 * 1000), 'ms': 1.0 / 1000, 's': 1, 'min': 60, 'h': 60 * 60, 'd': 60 * 60 * 24}, 'min': {'us': 1.0 / (1000 * 1000 * 60), 'ms': 1.0 / (1000 * 60), 's': 1.0 / 60, 'min': 1, 'h': 60, 'd': 60 * 24} } round_order = { 'TB': 'GB', 'GB': 'MB', 'MB': 'kB', 'kB': 'B', 'd': 'h', 'h': 'min', 'min': 's', 's': 'ms', 'ms': 'us' } if base_unit and base_unit not in convert: base_value, base_unit = strtol(base_unit, False) else: base_value = 1 if base_value is not None and base_unit in convert and unit in convert[base_unit]: value *= convert[base_unit][unit] / float(base_value) if unit in round_order: multiplier = convert[base_unit][round_order[unit]] value = round(value / float(multiplier)) * multiplier return value def parse_int(value: Any, base_unit: Optional[str] = None) -> Optional[int]: """Parse *value* as an :class:`int`. :param value: any value that can be handled either by :func:`strtol` or :func:`strtod`. If *value* contains a unit, then *base_unit* must be given. :param base_unit: an optional base unit to convert *value* through :func:`convert_to_base_unit`. Not used if *value* does not contain a unit. :returns: the parsed value, if able to parse. Otherwise returns ``None``. :Example: >>> parse_int('1') == 1 True >>> parse_int(' 0x400 MB ', '16384kB') == 64 True >>> parse_int('1MB', 'kB') == 1024 True >>> parse_int('1000 ms', 's') == 1 True >>> parse_int('1TB', 'GB') is None True >>> parse_int(50, None) == 50 True >>> parse_int("51", None) == 51 True >>> parse_int("nonsense", None) == None True >>> parse_int("nonsense", "kB") == None True >>> parse_int("nonsense") == None True >>> parse_int(0) == 0 True >>> parse_int('6GB', '16MB') == 384 True >>> parse_int('4097.4kB', 'kB') == 4097 True >>> parse_int('4097.5kB', 'kB') == 4098 True """ val, unit = strtol(value) if val is None and unit.startswith('.') or unit and unit[0] in ('.', 'e', 'E'): val, unit = strtod(value) if val is not None: unit = unit.strip() if not unit: return round(val) val = convert_to_base_unit(val, unit, base_unit) if val is not None: return round(val) def parse_real(value: Any, base_unit: Optional[str] = None) -> Optional[float]: """Parse *value* as a :class:`float`. :param value: any value that can be handled by :func:`strtod`. If *value* contains a unit, then *base_unit* must be given. :param base_unit: an optional base unit to convert *value* through :func:`convert_to_base_unit`. Not used if *value* does not contain a unit. :returns: the parsed value, if able to parse. Otherwise returns ``None``. :Example: >>> parse_real(' +0.0005 ') == 0.0005 True >>> parse_real('0.0005ms', 'ms') == 0.0 True >>> parse_real('0.00051ms', 'ms') == 0.001 True """ val, unit = strtod(value) if val is not None: unit = unit.strip() if not unit: return val return convert_to_base_unit(val, unit, base_unit) def compare_values(vartype: str, unit: Optional[str], settings_value: Any, config_value: Any) -> bool: """Check if the value from ``pg_settings`` and from Patroni config are equivalent after parsing them as *vartype*. :param vartype: the target type to parse *settings_value* and *config_value* before comparing them. Accepts any among of the following (case sensitive): * ``bool``: parse values using :func:`parse_bool`; or * ``integer``: parse values using :func:`parse_int`; or * ``real``: parse values using :func:`parse_real`; or * ``enum``: parse values as lowercase strings; or * ``string``: parse values as strings. This one is used by default if no valid value is passed as *vartype*. :param unit: base unit to be used as argument when calling :func:`parse_int` or :func:`parse_real` for *config_value*. :param settings_value: value to be compared with *config_value*. :param config_value: value to be compared with *settings_value*. :returns: ``True`` if *settings_value* is equivalent to *config_value* when both are parsed as *vartype*. :Example: >>> compare_values('enum', None, 'remote_write', 'REMOTE_WRITE') True >>> compare_values('string', None, 'remote_write', 'REMOTE_WRITE') False >>> compare_values('real', None, '1e-06', 0.000001) True >>> compare_values('integer', 'MB', '6GB', '6GB') False >>> compare_values('integer', None, '6GB', '6GB') False >>> compare_values('integer', '16384kB', '64', ' 0x400 MB ') True >>> compare_values('integer', '2MB', 524288, '1TB') True >>> compare_values('integer', 'MB', 1048576, '1TB') True >>> compare_values('integer', 'kB', 4098, '4097.5kB') True """ converters: Dict[str, Callable[[str, Optional[str]], Union[None, bool, int, float, str]]] = { 'bool': lambda v1, v2: parse_bool(v1), 'integer': parse_int, 'real': parse_real, 'enum': lambda v1, v2: str(v1).lower(), 'string': lambda v1, v2: str(v1) } converter = converters.get(vartype) or converters['string'] old_converted = converter(settings_value, None) new_converted = converter(config_value, unit) return old_converted is not None and new_converted is not None and old_converted == new_converted def _sleep(interval: Union[int, float]) -> None: """Wrap :func:`~time.sleep`. :param interval: Delay execution for a given number of seconds. The argument may be a floating point number for subsecond precision. """ time.sleep(interval) def read_stripped(file_path: str) -> Iterator[str]: """Iterate over stripped lines in the given file. :param file_path: path to the file to read from :yields: each line from the given file stripped """ with open(file_path) as f: for line in f: yield line.strip() class RetryFailedError(PatroniException): """Maximum number of attempts exhausted in retry operation.""" class Retry(object): """Helper for retrying a method in the face of retryable exceptions. :ivar max_tries: how many times to retry the command. :ivar delay: initial delay between retry attempts. :ivar backoff: backoff multiplier between retry attempts. :ivar max_jitter: additional max jitter period to wait between retry attempts to avoid slamming the server. :ivar max_delay: maximum delay in seconds, regardless of other backoff settings. :ivar sleep_func: function used to introduce artificial delays. :ivar deadline: timeout for operation retries. :ivar retry_exceptions: single exception or tuple """ def __init__(self, max_tries: Optional[int] = 1, delay: float = 0.1, backoff: int = 2, max_jitter: float = 0.8, max_delay: int = 3600, sleep_func: Callable[[Union[int, float]], None] = _sleep, deadline: Optional[Union[int, float]] = None, retry_exceptions: Union[Type[Exception], Tuple[Type[Exception], ...]] = PatroniException) -> None: """Create a :class:`Retry` instance for retrying function calls. :param max_tries: how many times to retry the command. ``-1`` means infinite tries. :param delay: initial delay between retry attempts. :param backoff: backoff multiplier between retry attempts. Defaults to ``2`` for exponential backoff. :param max_jitter: additional max jitter period to wait between retry attempts to avoid slamming the server. :param max_delay: maximum delay in seconds, regardless of other backoff settings. :param sleep_func: function used to introduce artificial delays. :param deadline: timeout for operation retries. :param retry_exceptions: single exception or tuple """ self.max_tries = max_tries self.delay = delay self.backoff = backoff self.max_jitter = int(max_jitter * 100) self.max_delay = float(max_delay) self._attempts = 0 self._cur_delay = delay self.deadline = deadline self._cur_stoptime = None self.sleep_func = sleep_func self.retry_exceptions = retry_exceptions def reset(self) -> None: """Reset the attempt counter, delay and stop time.""" self._attempts = 0 self._cur_delay = self.delay self._cur_stoptime = None def copy(self) -> 'Retry': """Return a clone of this retry manager.""" return Retry(max_tries=self.max_tries, delay=self.delay, backoff=self.backoff, max_jitter=self.max_jitter / 100.0, max_delay=int(self.max_delay), sleep_func=self.sleep_func, deadline=self.deadline, retry_exceptions=self.retry_exceptions) @property def sleeptime(self) -> float: """Get next cycle sleep time. It is based on the current delay plus a number up to ``max_jitter``. """ return self._cur_delay + (random.randint(0, self.max_jitter) / 100.0) def update_delay(self) -> None: """Set next cycle delay. It will be the minimum value between: * current delay with ``backoff``; or * ``max_delay``. """ self._cur_delay = min(self._cur_delay * self.backoff, self.max_delay) @property def stoptime(self) -> float: """Get the current stop time.""" return self._cur_stoptime or 0 def ensure_deadline(self, timeout: float, raise_ex: Optional[Exception] = None) -> bool: """Calculates, sets, and checks the remaining deadline time. :param timeout: if the *deadline* is smaller than the provided *timeout* value raise *raise_ex* exception. :param raise_ex: the exception object that will be raised if the *deadline* is smaller than provided *timeout*. :returns: ``False`` if *deadline* is smaller than a provided *timeout* and *raise_ex* isn't set. Otherwise ``True``. :raises: :class:`Exception`: *raise_ex* if calculated deadline is smaller than provided *timeout*. """ self.deadline = self.stoptime - time.time() if self.deadline < timeout: if raise_ex: raise raise_ex return False return True def __call__(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any: """Call a function *func* with arguments ``*args`` and ``*kwargs`` in a loop. *func* will be called until one of the following conditions is met: * It completes without throwing one of the configured ``retry_exceptions``; or * ``max_retries`` is exceeded.; or * ``deadline`` is exceeded. .. note:: * It will set loop stop time based on ``deadline`` attribute. * It will adjust delay on each cycle. :param func: function to call. :param args: positional arguments to call *func* with. :params kwargs: keyword arguments to call *func* with. :raises: :class:`RetryFailedError`: * If ``max_tries`` is exceeded; or * If ``deadline`` is exceeded. """ self.reset() while True: try: if self.deadline is not None and self._cur_stoptime is None: self._cur_stoptime = time.time() + self.deadline return func(*args, **kwargs) except self.retry_exceptions as e: # Note: max_tries == -1 means infinite tries. if self._attempts == self.max_tries: logger.warning('Retry got exception: %s', e) raise RetryFailedError("Too many retry attempts") self._attempts += 1 sleeptime = getattr(e, 'sleeptime', None) if not isinstance(sleeptime, (int, float)): sleeptime = self.sleeptime if self._cur_stoptime is not None and time.time() + sleeptime >= self._cur_stoptime: logger.warning('Retry got exception: %s', e) raise RetryFailedError("Exceeded retry deadline") logger.debug('Retry got exception: %s', e) self.sleep_func(sleeptime) self.update_delay() def polling_loop(timeout: Union[int, float], interval: Union[int, float] = 1) -> Iterator[int]: """Return an iterator that returns values every *interval* seconds until *timeout* has passed. .. note:: Timeout is measured from start of iteration. :param timeout: for how long (in seconds) from now it should keep returning values. :param interval: for how long to sleep before returning a new value. :yields: current iteration counter, starting from ``0``. """ start_time = time.time() iteration = 0 end_time = start_time + timeout while time.time() < end_time: yield iteration iteration += 1 time.sleep(float(interval)) def split_host_port(value: str, default_port: Optional[int]) -> Tuple[str, int]: """Extract host(s) and port from *value*. :param value: string from where host(s) and port will be extracted. Accepts either of these formats: * ``host:port``; or * ``host1,host2,...,hostn:port``. Each ``host`` portion of *value* can be either: * A FQDN; or * An IPv4 address; or * An IPv6 address, with or without square brackets. :param default_port: if no port can be found in *param*, use *default_port* instead. :returns: the first item is composed of a CSV list of hosts from *value*, and the second item is either the port from *value* or *default_port*. :Example: >>> split_host_port('127.0.0.1', 5432) ('127.0.0.1', 5432) >>> split_host_port('127.0.0.1:5400', 5432) ('127.0.0.1', 5400) >>> split_host_port('127.0.0.1,192.168.0.101:5400', 5432) ('127.0.0.1,192.168.0.101', 5400) >>> split_host_port('127.0.0.1,www.mydomain.com,[fe80:0:0:0:213:72ff:fe3c:21bf], 0:0:0:0:0:0:0:0:5400', 5432) ('127.0.0.1,www.mydomain.com,fe80:0:0:0:213:72ff:fe3c:21bf,0:0:0:0:0:0:0:0', 5400) """ t = value.rsplit(':', 1) # If *value* contains ``:`` we consider it to be an IPv6 address, so we attempt to remove possible square brackets if ':' in t[0]: t[0] = ','.join([h.strip().strip('[]') for h in t[0].split(',')]) t.append(str(default_port)) return t[0], int(t[1]) def uri(proto: str, netloc: Union[List[str], Tuple[str, Union[int, str]], str], path: Optional[str] = '', user: Optional[str] = None) -> str: """Construct URI from given arguments. :param proto: the URI protocol. :param netloc: the URI host(s) and port. Can be specified in either way among * A :class:`list` or :class:`tuple`. The second item should be a port, and the first item should be composed of hosts in either of these formats: * ``host``; or. * ``host1,host2,...,hostn``. * A :class:`str` in either of these formats: * ``host:port``; or * ``host1,host2,...,hostn:port``. In all cases, each ``host`` portion of *netloc* can be either: * An FQDN; or * An IPv4 address; or * An IPv6 address, with or without square brackets. :param path: the URI path. :param user: the authenticating user, if any. :returns: constructed URI. """ host, port = netloc if isinstance(netloc, (list, tuple)) else split_host_port(netloc, 0) # If ``host`` contains ``:`` we consider it to be an IPv6 address, so we add square brackets if they are missing if host and ':' in host and host[0] != '[' and host[-1] != ']': host = '[{0}]'.format(host) port = ':{0}'.format(port) if port else '' path = '/{0}'.format(path) if path and not path.startswith('/') else path user = '{0}@'.format(user) if user else '' return '{0}://{1}{2}{3}{4}'.format(proto, user, host, port, path) def iter_response_objects(response: HTTPResponse) -> Iterator[Dict[str, Any]]: """Iterate over the chunks of a :class:`~urllib3.response.HTTPResponse` and yield each JSON document that is found. :param response: the HTTP response from which JSON documents will be retrieved. :yields: current JSON document. """ prev = '' decoder = JSONDecoder() for chunk in response.read_chunked(decode_content=False): chunk = prev + chunk.decode('utf-8') length = len(chunk) # ``chunk`` is analyzed in parts. ``idx`` holds the position of the first character in the current part that is # neither space nor tab nor line-break, or in other words, the position in the ``chunk`` where it is likely # that a JSON document begins idx = WHITESPACE_RE.match(chunk, 0).end() # pyright: ignore [reportOptionalMemberAccess] while idx < length: try: # Get a JSON document from the chunk. ``message`` is a dictionary representing the JSON document, and # ``idx`` becomes the position in the ``chunk`` where the retrieved JSON document ends message, idx = decoder.raw_decode(chunk, idx) except ValueError: # malformed or incomplete JSON, unlikely to happen break else: yield message idx = WHITESPACE_RE.match(chunk, idx).end() # pyright: ignore [reportOptionalMemberAccess] # It is not usual that a ``chunk`` would contain more than one JSON document, but we handle that just in case prev = chunk[idx:] def cluster_as_json(cluster: 'Cluster', global_config: Optional['GlobalConfig'] = None) -> Dict[str, Any]: """Get a JSON representation of *cluster*. :param cluster: the :class:`~patroni.dcs.Cluster` object to be parsed as JSON. :param global_config: optional :class:`~patroni.config.GlobalConfig` object to check the cluster state. if not provided will be instantiated from the `Cluster.config`. :returns: JSON representation of *cluster*. These are the possible keys in the returning object depending on the available information in *cluster*: * ``members``: list of members in the cluster. Each value is a :class:`dict` that may have the following keys: * ``name``: the name of the host (unique in the cluster). The ``members`` list is sorted by this key; * ``role``: ``leader``, ``standby_leader``, ``sync_standby``, or ``replica``; * ``state``: ``stopping``, ``stopped``, ``stop failed``, ``crashed``, ``running``, ``starting``, ``start failed``, ``restarting``, ``restart failed``, ``initializing new cluster``, ``initdb failed``, ``running custom bootstrap script``, ``custom bootstrap failed``, or ``creating replica``; * ``api_url``: REST API URL based on ``restapi->connect_address`` configuration; * ``host``: PostgreSQL host based on ``postgresql->connect_address``; * ``port``: PostgreSQL port based on ``postgresql->connect_address``; * ``timeline``: PostgreSQL current timeline; * ``pending_restart``: ``True`` if PostgreSQL is pending to be restarted; * ``scheduled_restart``: scheduled restart timestamp, if any; * ``tags``: any tags that were set for this member; * ``lag``: replication lag, if applicable; * ``pause``: ``True`` if cluster is in maintenance mode; * ``scheduled_switchover``: if a switchover has been scheduled, then it contains this entry with these keys: * ``at``: timestamp when switchover was scheduled to occur; * ``from``: name of the member to be demoted; * ``to``: name of the member to be promoted. """ if not global_config: from patroni.config import get_global_config global_config = get_global_config(cluster) leader_name = cluster.leader.name if cluster.leader else None cluster_lsn = cluster.last_lsn or 0 ret: Dict[str, Any] = {'members': []} for m in cluster.members: if m.name == leader_name: role = 'standby_leader' if global_config.is_standby_cluster else 'leader' elif cluster.sync.matches(m.name): role = 'sync_standby' else: role = 'replica' state = (m.data.get('replication_state', '') if role != 'leader' else '') or m.data.get('state', '') member = {'name': m.name, 'role': role, 'state': state, 'api_url': m.api_url} conn_kwargs = m.conn_kwargs() if conn_kwargs.get('host'): member['host'] = conn_kwargs['host'] if conn_kwargs.get('port'): member['port'] = int(conn_kwargs['port']) optional_attributes = ('timeline', 'pending_restart', 'scheduled_restart', 'tags') member.update({n: m.data[n] for n in optional_attributes if n in m.data}) if m.name != leader_name: lsn = m.lsn if lsn is None: member['lag'] = 'unknown' elif cluster_lsn >= lsn: member['lag'] = cluster_lsn - lsn else: member['lag'] = 0 ret['members'].append(member) # sort members by name for consistency cmp: Callable[[Dict[str, Any]], bool] = lambda m: m['name'] ret['members'].sort(key=cmp) if global_config.is_paused: ret['pause'] = True if cluster.failover and cluster.failover.scheduled_at: ret['scheduled_switchover'] = {'at': cluster.failover.scheduled_at.isoformat()} if cluster.failover.leader: ret['scheduled_switchover']['from'] = cluster.failover.leader if cluster.failover.candidate: ret['scheduled_switchover']['to'] = cluster.failover.candidate return ret def is_subpath(d1: str, d2: str) -> bool: """Check if the file system path *d2* is contained within *d1* after resolving symbolic links. .. note:: It will not check if the paths actually exist, it will only expand the paths and resolve any symbolic links that happen to be found. :param d1: path to a directory. :param d2: path to be checked if is within *d1*. :returns: ``True`` if *d1* is a subpath of *d2*. """ real_d1 = os.path.realpath(d1) + os.path.sep real_d2 = os.path.realpath(os.path.join(real_d1, d2)) return os.path.commonprefix([real_d1, real_d2 + os.path.sep]) == real_d1 def validate_directory(d: str, msg: str = "{} {}") -> None: """Ensure directory exists and is writable. .. note:: If the directory does not exist, :func:`validate_directory` will attempt to create it. :param d: the directory to be checked. :param msg: a message to be thrown when raising :class:`~patroni.exceptions.PatroniException`, if any issue is faced. It must contain 2 placeholders to be used by :func:`format`: * The first placeholder will be replaced with path *d*; * The second placeholder will be replaced with the error condition. :raises: :class:`~patroni.exceptions.PatroniException`: if any issue is observed while validating *d*. Can be thrown if: * *d* did not exist, and :func:`validate_directory` was not able to create it; or * *d* is an existing directory, but Patroni is not able to write to that directory; or * *d* is an existing file, not a directory. """ if not os.path.exists(d): try: os.makedirs(d) except OSError as e: logger.error(e) if e.errno != errno.EEXIST: raise PatroniException(msg.format(d, "couldn't create the directory")) elif os.path.isdir(d): try: fd, tmpfile = tempfile.mkstemp(dir=d) os.close(fd) os.remove(tmpfile) except OSError: raise PatroniException(msg.format(d, "the directory is not writable")) else: raise PatroniException(msg.format(d, "is not a directory")) def data_directory_is_empty(data_dir: str) -> bool: """Check if a PostgreSQL data directory is empty. .. note:: In non-Windows environments *data_dir* is also considered empty if it only contains hidden files and/or ``lost+found`` directory. :param data_dir: the PostgreSQL data directory to be checked. :returns: ``True`` if *data_dir* is empty. """ if not os.path.exists(data_dir): return True return all(os.name != 'nt' and (n.startswith('.') or n == 'lost+found') for n in os.listdir(data_dir)) def keepalive_intvl(timeout: int, idle: int, cnt: int = 3) -> int: """Calculate the value to be used as ``TCP_KEEPINTVL`` based on *timeout*, *idle*, and *cnt*. :param timeout: value for ``TCP_USER_TIMEOUT``. :param idle: value for ``TCP_KEEPIDLE``. :param cnt: value for ``TCP_KEEPCNT``. :returns: the value to be used as ``TCP_KEEPINTVL``. """ return max(1, int(float(timeout - idle) / cnt)) def keepalive_socket_options(timeout: int, idle: int, cnt: int = 3) -> Iterator[Tuple[int, int, int]]: """Get all keepalive related options to be set in a socket. :param timeout: value for ``TCP_USER_TIMEOUT``. :param idle: value for ``TCP_KEEPIDLE``. :param cnt: value for ``TCP_KEEPCNT``. :yields: all keepalive related socket options to be set. The first item in the tuple is the protocol, the second item is the option, and the third item is the value to be used. The return values depend on the platform: * ``Windows``: * ``SO_KEEPALIVE``. * ``Linux``: * ``SO_KEEPALIVE``; * ``TCP_USER_TIMEOUT``; * ``TCP_KEEPIDLE``; * ``TCP_KEEPINTVL``; * ``TCP_KEEPCNT``. * ``MacOS``: * ``SO_KEEPALIVE``; * ``TCP_KEEPIDLE``; * ``TCP_KEEPINTVL``; * ``TCP_KEEPCNT``. """ yield (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) if not (sys.platform.startswith('linux') or sys.platform.startswith('darwin')): return if sys.platform.startswith('linux'): yield (socket.SOL_TCP, 18, int(timeout * 1000)) # TCP_USER_TIMEOUT # The socket constants from MacOS netinet/tcp.h are not exported by python's # socket module, therefore we are using 0x10, 0x101, 0x102 constants. TCP_KEEPIDLE = getattr(socket, 'TCP_KEEPIDLE', 0x10 if sys.platform.startswith('darwin') else None) if TCP_KEEPIDLE is not None: yield (socket.IPPROTO_TCP, TCP_KEEPIDLE, idle) TCP_KEEPINTVL = getattr(socket, 'TCP_KEEPINTVL', 0x101 if sys.platform.startswith('darwin') else None) if TCP_KEEPINTVL is not None: intvl = keepalive_intvl(timeout, idle, cnt) yield (socket.IPPROTO_TCP, TCP_KEEPINTVL, intvl) TCP_KEEPCNT = getattr(socket, 'TCP_KEEPCNT', 0x102 if sys.platform.startswith('darwin') else None) if TCP_KEEPCNT is not None: yield (socket.IPPROTO_TCP, TCP_KEEPCNT, cnt) def enable_keepalive(sock: socket.socket, timeout: int, idle: int, cnt: int = 3) -> None: """Enable keepalive for *sock*. Will set socket options depending on the platform, as per return of :func:`keepalive_socket_options`. .. note:: Value for ``TCP_KEEPINTVL`` will be calculated through :func:`keepalive_intvl` based on *timeout*, *idle*, and *cnt*. :param sock: the socket for which keepalive will be enabled. :param timeout: value for ``TCP_USER_TIMEOUT``. :param idle: value for ``TCP_KEEPIDLE``. :param cnt: value for ``TCP_KEEPCNT``. :returns: output of :func:`~socket.ioctl` if we are on Windows, nothing otherwise. """ SIO_KEEPALIVE_VALS = getattr(socket, 'SIO_KEEPALIVE_VALS', None) if SIO_KEEPALIVE_VALS is not None: # Windows intvl = keepalive_intvl(timeout, idle, cnt) sock.ioctl(SIO_KEEPALIVE_VALS, (1, idle * 1000, intvl * 1000)) for opt in keepalive_socket_options(timeout, idle, cnt): sock.setsockopt(*opt) def unquote(string: str) -> str: """Unquote a fully quoted *string*. :param string: The string to be checked for quoting. :returns: The string with quotes removed, if it is a fully quoted single string, or the original string if quoting is not detected, or unquoting was not possible. :Examples: A *string* with quotes will have those quotes removed >>> unquote('"a quoted string"') 'a quoted string' A *string* with multiple quotes will be returned as is >>> unquote('"a multi" "quoted string"') '"a multi" "quoted string"' So will a *string* with unbalanced quotes >>> unquote('unbalanced "quoted string') 'unbalanced "quoted string' """ try: ret = split(string) ret = ret[0] if len(ret) == 1 else string except ValueError: ret = string return ret def get_major_version(bin_dir: Optional[str] = None, bin_name: str = 'postgres') -> str: """Get the major version of PostgreSQL. It is based on the output of ``postgres --version``. :param bin_dir: path to the PostgreSQL binaries directory. If ``None`` or an empty string, it will use the first *bin_name* binary that is found by the subprocess in the ``PATH``. :param bin_name: name of the postgres binary to call (``postgres`` by default) :returns: the PostgreSQL major version. :raises: :exc:`~patroni.exceptions.PatroniException`: if the postgres binary call failed due to :exc:`OSError`. :Example: * Returns `9.6` for PostgreSQL 9.6.24 * Returns `15` for PostgreSQL 15.2 """ if not bin_dir: binary = bin_name else: binary = os.path.join(bin_dir, bin_name) try: version = subprocess.check_output([binary, '--version']).decode() except OSError as e: raise PatroniException(f'Failed to get postgres version: {e}') version = re.match(r'^[^\s]+ [^\s]+ (\d+)(\.(\d+))?', version) if TYPE_CHECKING: # pragma: no cover assert version is not None return '.'.join([version.group(1), version.group(3)]) if int(version.group(1)) < 10 else version.group(1) patroni-3.2.2/patroni/validator.py000066400000000000000000001435141455170150700172220ustar00rootroot00000000000000#!/usr/bin/env python3 """Patroni configuration validation helpers. This module contains facilities for validating configuration of Patroni processes. :var schema: configuration schema of the daemon launched by ``patroni`` command. """ import os import shutil import socket from typing import Any, Dict, Union, Iterator, List, Optional as OptionalType, Tuple, TYPE_CHECKING from .collections import CaseInsensitiveSet from .dcs import dcs_modules from .exceptions import ConfigParseError from .utils import parse_int, split_host_port, data_directory_is_empty, get_major_version def data_directory_empty(data_dir: str) -> bool: """Check if PostgreSQL data directory is empty. :param data_dir: path to the PostgreSQL data directory to be checked. :returns: ``True`` if the data directory is empty. """ if os.path.isfile(os.path.join(data_dir, "global", "pg_control")): return False return data_directory_is_empty(data_dir) def validate_connect_address(address: str) -> bool: """Check if options related to connection address were properly configured. :param address: address to be validated in the format ``host:ip``. :returns: ``True`` if the address is valid. :raises: :class:`~patroni.exceptions.ConfigParseError`: * If the address is not in the expected format; or * If the host is set to not allowed values (``127.0.0.1``, ``0.0.0.0``, ``*``, ``::1``, or ``localhost``). """ try: host, _ = split_host_port(address, 1) except (AttributeError, TypeError, ValueError): raise ConfigParseError("contains a wrong value") if host in ["127.0.0.1", "0.0.0.0", "*", "::1", "localhost"]: raise ConfigParseError('must not contain "127.0.0.1", "0.0.0.0", "*", "::1", "localhost"') return True def validate_host_port(host_port: str, listen: bool = False, multiple_hosts: bool = False) -> bool: """Check if host(s) and port are valid and available for usage. :param host_port: the host(s) and port to be validated. It can be in either of these formats: * ``host:ip``, if *multiple_hosts* is ``False``; or * ``host_1,host_2,...,host_n:port``, if *multiple_hosts* is ``True``. :param listen: if the address is expected to be available for binding. ``False`` means it expects to connect to that address, and ``True`` that it expects to bind to that address. :param multiple_hosts: if *host_port* can contain multiple hosts. :returns: ``True`` if the host(s) and port are valid. :raises: :class:`~patroni.exceptions.ConfigParseError`: * If the *host_port* is not in the expected format; or * If ``*`` was specified along with more hosts in *host_port*; or * If we are expecting to bind to an address that is already in use; or * If we are not able to connect to an address that we are expecting to do so; or * If :class:`~socket.gaierror` is thrown by socket module when attempting to connect to the given address(es). """ try: hosts, port = split_host_port(host_port, 1) except (ValueError, TypeError): raise ConfigParseError("contains a wrong value") else: if multiple_hosts: hosts = hosts.split(",") else: hosts = [hosts] if "*" in hosts: if len(hosts) != 1: raise ConfigParseError("expecting '*' alone") # If host is set to "*" get all hostnames and/or IP addresses that the host would be able to listen to hosts = [p[-1][0] for p in socket.getaddrinfo(None, port, 0, socket.SOCK_STREAM, 0, socket.AI_PASSIVE)] for host in hosts: # Check if "socket.IF_INET" or "socket.IF_INET6" is being used and instantiate a socket with the identified # protocol proto = socket.getaddrinfo(host, "", 0, socket.SOCK_STREAM, 0, socket.AI_PASSIVE) s = socket.socket(proto[0][0], socket.SOCK_STREAM) try: if s.connect_ex((host, port)) == 0: if listen: raise ConfigParseError("Port {} is already in use.".format(port)) elif not listen: raise ConfigParseError("{} is not reachable".format(host_port)) except socket.gaierror as e: raise ConfigParseError(e) finally: s.close() return True def validate_host_port_list(value: List[str]) -> bool: """Validate a list of host(s) and port items. Call :func:`validate_host_port` with each item in *value*. :param value: list of host(s) and port items to be validated. :returns: ``True`` if all items are valid. """ assert all([validate_host_port(v) for v in value]), "didn't pass the validation" return True def comma_separated_host_port(string: str) -> bool: """Validate a list of host and port items. Call :func:`validate_host_port_list` with a list represented by the CSV *string*. :param string: comma-separated list of host and port items. :returns: ``True`` if all items in the CSV string are valid. """ return validate_host_port_list([s.strip() for s in string.split(",")]) def validate_host_port_listen(host_port: str) -> bool: """Check if host and port are valid and available for binding. Call :func:`validate_host_port` with *listen* set to ``True``. :param host_port: the host and port to be validated. Must be in the format ``host:ip``. :returns: ``True`` if the host and port are valid and available for binding. """ return validate_host_port(host_port, listen=True) def validate_host_port_listen_multiple_hosts(host_port: str) -> bool: """Check if host(s) and port are valid and available for binding. Call :func:`validate_host_port` with both *listen* and *multiple_hosts* set to ``True``. :param host_port: the host(s) and port to be validated. It can be in either of these formats * ``host:ip``; or * ``host_1,host_2,...,host_n:port`` :returns: ``True`` if the host(s) and port are valid and available for binding. """ return validate_host_port(host_port, listen=True, multiple_hosts=True) def is_ipv4_address(ip: str) -> bool: """Check if *ip* is a valid IPv4 address. :param ip: the IP to be checked. :returns: ``True`` if the IP is an IPv4 address. :raises: :class:`~patroni.exceptions.ConfigParseError`: if *ip* is not a valid IPv4 address. """ try: socket.inet_aton(ip) except Exception: raise ConfigParseError("Is not a valid ipv4 address") return True def is_ipv6_address(ip: str) -> bool: """Check if *ip* is a valid IPv6 address. :param ip: the IP to be checked. :returns: ``True`` if the IP is an IPv6 address. :raises: :class:`~patroni.exceptions.ConfigParseError`: if *ip* is not a valid IPv6 address. """ try: socket.inet_pton(socket.AF_INET6, ip) except Exception: raise ConfigParseError("Is not a valid ipv6 address") return True def get_bin_name(bin_name: str) -> str: """Get the value of ``postgresql.bin_name[*bin_name*]`` configuration option. :param bin_name: a key to be retrieved from ``postgresql.bin_name`` configuration. :returns: value of ``postgresql.bin_name[*bin_name*]``, if present, otherwise *bin_name*. """ if TYPE_CHECKING: # pragma: no cover assert isinstance(schema.data, dict) return (schema.data.get('postgresql', {}).get('bin_name', {}) or {}).get(bin_name, bin_name) def validate_data_dir(data_dir: str) -> bool: """Validate the value of ``postgresql.data_dir`` configuration option. It requires that ``postgresql.data_dir`` is set and match one of following conditions: * Point to a path that does not exist yet; or * Point to an empty directory; or * Point to a non-empty directory that seems to contain a valid PostgreSQL data directory. :param data_dir: the value of ``postgresql.data_dir`` configuration option. :returns: ``True`` if the PostgreSQL data directory is valid. :raises: :class:`~patroni.exceptions.ConfigParseError`: * If no *data_dir* was given; or * If *data_dir* is a file and not a directory; or * If *data_dir* is a non-empty directory and: * ``PG_VERSION`` file is not available in the directory * ``pg_wal``/``pg_xlog`` is not available in the directory * ``PG_VERSION`` content does not match the major version reported by ``postgres --version`` """ if not data_dir: raise ConfigParseError("is an empty string") elif os.path.exists(data_dir) and not os.path.isdir(data_dir): raise ConfigParseError("is not a directory") elif not data_directory_empty(data_dir): if not os.path.exists(os.path.join(data_dir, "PG_VERSION")): raise ConfigParseError("doesn't look like a valid data directory") else: with open(os.path.join(data_dir, "PG_VERSION"), "r") as version: pgversion = version.read().strip() waldir = ("pg_wal" if float(pgversion) >= 10 else "pg_xlog") if not os.path.isdir(os.path.join(data_dir, waldir)): raise ConfigParseError("data dir for the cluster is not empty, but doesn't contain" " \"{}\" directory".format(waldir)) if TYPE_CHECKING: # pragma: no cover assert isinstance(schema.data, dict) bin_dir = schema.data.get("postgresql", {}).get("bin_dir", None) major_version = get_major_version(bin_dir, get_bin_name('postgres')) if pgversion != major_version: raise ConfigParseError("data_dir directory postgresql version ({}) doesn't match with " "'postgres --version' output ({})".format(pgversion, major_version)) return True def validate_binary_name(bin_name: str) -> bool: """Validate the value of ``postgresql.binary_name[*bin_name*]`` configuration option. If ``postgresql.bin_dir`` is set and the value of the *bin_name* meets these conditions: * The path join of ``postgresql.bin_dir`` plus the *bin_name* value exists; and * The path join as above is executable If ``postgresql.bin_dir`` is not set, then validate that the value of *bin_name* meets this condition: * Is found in the system PATH using ``which`` :param bin_name: the value of the ``postgresql.bin_name[*bin_name*]`` :returns: ``True`` if the conditions are true :raises: :class:`~patroni.exceptions.ConfigParseError` if: * *bin_name* is not set; or * the path join of the ``postgresql.bin_dir`` plus *bin_name* does not exist; or * the path join as above is not executable; or * the *bin_name* cannot be found in the system PATH """ if not bin_name: raise ConfigParseError("is an empty string") if TYPE_CHECKING: # pragma: no cover assert isinstance(schema.data, dict) bin_dir = schema.data.get('postgresql', {}).get('bin_dir', None) if not shutil.which(bin_name, path=bin_dir): raise ConfigParseError(f"does not contain '{bin_name}' in '{bin_dir or '$PATH'}'") return True class Result(object): """Represent the result of a given validation that was performed. :ivar status: If the validation succeeded. :ivar path: YAML tree path of the configuration option. :ivar data: value of the configuration option. :ivar level: error level, in case of error. :ivar error: error message if the validation failed, otherwise ``None``. """ def __init__(self, status: bool, error: OptionalType[str] = "didn't pass validation", level: int = 0, path: str = "", data: Any = "") -> None: """Create a :class:`Result` object based on the given arguments. .. note:: ``error`` attribute is only set if *status* is failed. :param status: if the validation succeeded. :param error: error message related to the validation that was performed, if the validation failed. :param level: error level, in case of error. :param path: YAML tree path of the configuration option. :param data: value of the configuration option. """ self.status = status self.path = path self.data = data self.level = level self._error = error if not self.status: self.error = error else: self.error = None def __repr__(self) -> str: """Show configuration path and value. If the validation failed, also show the error message.""" return str(self.path) + (" " + str(self.data) + " " + str(self._error) if self.error else "") class Case(object): """Map how a list of available configuration options should be validated. .. note:: It should be used together with an :class:`Or` object. The :class:`Or` object will define the list of possible configuration options in a given context, and the :class:`Case` object will dictate how to validate each of them, if they are set. """ def __init__(self, schema: Dict[str, Any]) -> None: """Create a :class:`Case` object. :param schema: the schema for validating a set of attributes that may be available in the configuration. Each key is the configuration that is available in a given scope and that should be validated, and the related value is the validation function or expected type. :Example: .. code-block:: python Case({ "host": validate_host_port, "url": str, }) That will check that ``host`` configuration, if given, is valid based on :func:`validate_host_port`, and will also check that ``url`` configuration, if given, is a ``str`` instance. """ self._schema = schema class Or(object): """Represent the list of options that are available. It can represent either a list of configuration options that are available in a given scope, or a list of validation functions and/or expected types for a given configuration option. """ def __init__(self, *args: Any) -> None: """Create an :class:`Or` object. :param `*args`: any arguments that the caller wants to be stored in this :class:`Or` object. :Example: .. code-block:: python Or("host", "hosts"): Case({ "host": validate_host_port, "hosts": Or(comma_separated_host_port, [validate_host_port]), }) The outer :class:`Or` is used to define that ``host`` and ``hosts`` are possible options in this scope. The inner :class`Or` in the ``hosts`` key value is used to define that ``hosts`` option is valid if either of :func:`comma_separated_host_port` or :func:`validate_host_port` succeed to validate it. """ self.args = args class AtMostOne(object): """Mark that at most one option from a :class:`Case` can be suplied. Represents a list of possible configuration options in a given scope, where at most one can actually be provided. .. note:: It should be used together with a :class:`Case` object. """ def __init__(self, *args: str) -> None: """Create a :class`AtMostOne` object. :param `*args`: any arguments that the caller wants to be stored in this :class:`Or` object. :Example: .. code-block:: python AtMostOne("nofailover", "failover_priority"): Case({ "nofailover": bool, "failover_priority": IntValidator(min=0, raise_assert=True), }) The :class`AtMostOne` object is used to define that at most one of ``nofailover`` and ``failover_priority`` can be provided. """ self.args = args class Optional(object): """Mark a configuration option as optional. :ivar name: name of the configuration option. :ivar default: value to set if the configuration option is not explicitly provided """ def __init__(self, name: str, default: OptionalType[Any] = None) -> None: """Create an :class:`Optional` object. :param name: name of the configuration option. :param default: value to set if the configuration option is not explicitly provided """ self.name = name self.default = default class Directory(object): """Check if a directory contains the expected files. The attributes of objects of this class are used by their :func:`validate` method. :param contains: list of paths that should exist relative to a given directory. :param contains_executable: list of executable files that should exist directly under a given directory. """ def __init__(self, contains: OptionalType[List[str]] = None, contains_executable: OptionalType[List[str]] = None) -> None: """Create a :class:`Directory` object. :param contains: list of paths that should exist relative to a given directory. :param contains_executable: list of executable files that should exist directly under a given directory. """ self.contains = contains self.contains_executable = contains_executable def _check_executables(self, path: OptionalType[str] = None) -> Iterator[Result]: """Check that all executables from contains_executable list exist within the given directory or within ``PATH``. :param path: optional path to the base directory against which executables will be validated. If not provided, check within ``PATH``. :yields: objects with the error message containing the name of the executable, if any check fails. """ for program in self.contains_executable or []: if not shutil.which(program, path=path): yield Result(False, f"does not contain '{program}' in '{(path or '$PATH')}'") def validate(self, name: str) -> Iterator[Result]: """Check if the expected paths and executables can be found under *name* directory. :param name: path to the base directory against which paths and executables will be validated. Check against ``PATH`` if name is not provided. :yields: objects with the error message related to the failure, if any check fails. """ if not name: yield from self._check_executables() elif not os.path.exists(name): yield Result(False, "Directory '{}' does not exist.".format(name)) elif not os.path.isdir(name): yield Result(False, "'{}' is not a directory.".format(name)) else: if self.contains: for path in self.contains: if not os.path.exists(os.path.join(name, path)): yield Result(False, "'{}' does not contain '{}'".format(name, path)) yield from self._check_executables(path=name) class BinDirectory(Directory): """Check if a Postgres binary directory contains the expected files. It is a subclass of :class:`Directory` with an extended capability: translating ``BINARIES`` according to configured ``postgresql.bin_name``, if any. :cvar BINARIES: list of executable files that should exist directly under a given Postgres binary directory. """ # ``pg_rewind`` is not in the list because its usage by Patroni is optional. Also, it is not available by default on # Postgres 9.3 and 9.4, versions which Patroni supports. BINARIES = ["pg_ctl", "initdb", "pg_controldata", "pg_basebackup", "postgres", "pg_isready"] def validate(self, name: str) -> Iterator[Result]: """Check if the expected executables can be found under *name* binary directory. :param name: path to the base directory against which executables will be validated. Check against PATH if *name* is not provided. :yields: objects with the error message related to the failure, if any check fails. """ self.contains_executable: List[str] = [get_bin_name(binary) for binary in self.BINARIES] yield from super().validate(name) class Schema(object): """Define a configuration schema. It contains all the configuration options that are available in each scope, including the validation(s) that should be performed against each one of them. The validations will be performed whenever the :class:`Schema` object is called, or its :func:`validate` method is called. :ivar validator: validator of the configuration schema. Can be any of these: * :class:`str`: defines that a string value is required; or * :class:`type`: any subclass of :class:`type`, defines that a value of the given type is required; or * ``callable``: any callable object, defines that validation will follow the code defined in the callable object. If the callable object contains an ``expected_type`` attribute, then it will check if the configuration value is of the expected type before calling the code of the callable object; or * :class:`list`: list representing one or more values in the configuration; or * :class:`dict`: dictionary representing the YAML configuration tree. """ def __init__(self, validator: Union[Dict[Any, Any], List[Any], Any]) -> None: """Create a :class:`Schema` object. .. note:: This class is expected to be initially instantiated with a :class:`dict` based *validator* argument. The idea is that dict represents the full YAML tree of configuration options. The :func:`validate` method will then walk recursively through the configuration tree, creating new instances of :class:`Schema` with the new "base path", to validate the structure and the leaf values of the tree. The recursion stops on leaf nodes, when it performs checks of the actual setting values. :param validator: validator of the configuration schema. Can be any of these: * :class:`str`: defines that a string value is required; or * :class:`type`: any subclass of :class:`type`, defines that a value of the given type is required; or * ``callable``: Any callable object, defines that validation will follow the code defined in the callable object. If the callable object contains an ``expected_type`` attribute, then it will check if the configuration value is of the expected type before calling the code of the callable object; or * :class:`list`: list representing it expects to contain one or more values in the configuration; or * :class:`dict`: dictionary representing the YAML configuration tree. The first 3 items in the above list are here referenced as "base validators", which cause the recursion to stop. If *validator* is a :class:`dict`, then you should follow these rules: * For the keys it can be either: * A :class:`str` instance. It will be the name of the configuration option; or * An :class:`Optional` instance. The ``name`` attribute of that object will be the name of the configuration option, and that class makes this configuration option as optional to the user, allowing it to not be specified in the YAML; or * An :class:`Or` instance. The ``args`` attribute of that object will contain a tuple of configuration option names. At least one of them should be specified by the user in the YAML; * For the values it can be either: * A new :class:`dict` instance. It will represent a new level in the YAML configuration tree; or * A :class:`Case` instance. This is required if the key of this value is an :class:`Or` instance, and the :class:`Case` instance is used to map each of the ``args`` in :class:`Or` to their corresponding base validator in :class:`Case`; or * An :class:`Or` instance with one or more base validators; or * A :class:`list` instance with a single item which is the base validator; or * A base validator. :Example: .. code-block:: python Schema({ "application_name": str, "bind": { "host": validate_host, "port": int, }, "aliases": [str], Optional("data_directory"): "/var/lib/myapp", Or("log_to_file", "log_to_db"): Case({ "log_to_file": bool, "log_to_db": bool, }), "version": Or(int, float), }) This sample schema defines that your YAML configuration follows these rules: * It must contain an ``application_name`` entry which value should be a :class:`str` instance; * It must contain a ``bind.host`` entry which value should be valid as per function ``validate_host``; * It must contain a ``bind.port`` entry which value should be an :class:`int` instance; * It must contain a ``aliases`` entry which value should be a :class:`list` of :class:`str` instances; * It may optionally contain a ``data_directory`` entry, with a value which should be a string; * It must contain at least one of ``log_to_file`` or ``log_to_db``, with a value which should be a :class:`bool` instance; * It must contain a ``version`` entry which value should be either an :class:`int` or a :class:`float` instance. """ self.validator = validator def __call__(self, data: Any) -> List[str]: """Perform validation of data using the rules defined in this schema. :param data: configuration to be validated against ``validator``. :returns: list of errors identified while validating the *data*, if any. """ errors: List[str] = [] for i in self.validate(data): if not i.status: errors.append(str(i)) return errors def validate(self, data: Union[Dict[Any, Any], Any]) -> Iterator[Result]: """Perform all validations from the schema against the given configuration. It first checks that *data* argument type is compliant with the type of ``validator`` attribute. Additionally: * If ``validator`` attribute is a callable object, calls it to validate *data* argument. Before doing so, if `validator` contains an ``expected_type`` attribute, check if *data* argument is compliant with that expected type. * If ``validator`` attribute is an iterable object (:class:`dict`, :class:`list`, :class:`Directory` or :class:`Or`), then it iterates over it to validate each of the corresponding entries in *data* argument. :param data: configuration to be validated against ``validator``. :yields: objects with the error message related to the failure, if any check fails. """ self.data = data # New `Schema` objects can be created while validating a given `Schema`, depending on its structure. The first # 3 IF statements deal with the situation where we already reached a leaf node in the `Schema` structure, then # we are dealing with an actual value validation. The remaining logic in this method is used to iterate through # iterable objects in the structure, until we eventually reach a leaf node to validate its value. if isinstance(self.validator, str): yield Result(isinstance(self.data, str), "is not a string", level=1, data=self.data) elif isinstance(self.validator, type): yield Result(isinstance(self.data, self.validator), "is not {}".format(_get_type_name(self.validator)), level=1, data=self.data) elif callable(self.validator): if hasattr(self.validator, "expected_type"): if not isinstance(data, self.validator.expected_type): yield Result(False, "is not {}" .format(_get_type_name(self.validator.expected_type)), level=1, data=self.data) return try: self.validator(data) yield Result(True, data=self.data) except Exception as e: yield Result(False, "didn't pass validation: {}".format(e), data=self.data) elif isinstance(self.validator, dict): if not isinstance(self.data, dict): yield Result(isinstance(self.data, dict), "is not a dictionary", level=1, data=self.data) elif isinstance(self.validator, list): if not isinstance(self.data, list): yield Result(isinstance(self.data, list), "is not a list", level=1, data=self.data) return yield from self.iter() def iter(self) -> Iterator[Result]: """Iterate over ``validator``, if it is an iterable object, to validate the corresponding entries in ``data``. Only :class:`dict`, :class:`list`, :class:`Directory` and :class:`Or` objects are considered iterable objects. :yields: objects with the error message related to the failure, if any check fails. """ if isinstance(self.validator, dict): if not isinstance(self.data, dict): yield Result(False, "is not a dictionary.", level=1) else: yield from self.iter_dict() elif isinstance(self.validator, list): if len(self.data) == 0: yield Result(False, "is an empty list", data=self.data) if self.validator: for key, value in enumerate(self.data): # Although the value in the configuration (`data`) is expected to contain 1 or more entries, only # the first validator defined in `validator` property list will be used. It is only defined as a # `list` in `validator` so this logic can understand that the value in `data` attribute should be a # `list`. For example: "pg_hba": [str] in `validator` attribute defines that "pg_hba" in `data` # attribute should contain a list with one or more `str` entries. for v in Schema(self.validator[0]).validate(value): yield Result(v.status, v.error, path=(str(key) + ("." + v.path if v.path else "")), level=v.level, data=value) elif isinstance(self.validator, Directory) and isinstance(self.data, str): yield from self.validator.validate(self.data) elif isinstance(self.validator, Or): yield from self.iter_or() def iter_dict(self) -> Iterator[Result]: """Iterate over a :class:`dict` based ``validator`` to validate the corresponding entries in ``data``. :yields: objects with the error message related to the failure, if any check fails. """ # One key in `validator` attribute (`key` variable) can be mapped to one or more keys in `data` attribute (`d` # variable), depending on the `key` type. if TYPE_CHECKING: # pragma: no cover assert isinstance(self.validator, dict) assert isinstance(self.data, dict) for key in self.validator.keys(): if isinstance(key, AtMostOne) and len(list(self._data_key(key))) > 1: yield Result(False, f"Multiple of {key.args} provided") continue for d in self._data_key(key): if d not in self.data and not isinstance(key, Optional): yield Result(False, "is not defined.", path=d) elif d not in self.data and isinstance(key, Optional) and key.default is None: continue else: if d not in self.data and isinstance(key, Optional): self.data[d] = key.default validator = self.validator[key] if isinstance(key, (Or, AtMostOne)) and isinstance(self.validator[key], Case): validator = self.validator[key]._schema[d] # In this loop we may be calling a new `Schema` either over an intermediate node in the tree, or # over a leaf node. In the latter case the recursive calls in the given path will finish. for v in Schema(validator).validate(self.data[d]): yield Result(v.status, v.error, path=(d + ("." + v.path if v.path else "")), level=v.level, data=v.data) def iter_or(self) -> Iterator[Result]: """Perform all validations defined in an :class:`Or` object for a given configuration option. This method can be only called against leaf nodes in the configuration tree. :class:`Or` objects defined in the ``validator`` keys will be handled by :func:`iter_dict` method. :yields: objects with the error message related to the failure, if any check fails. """ if TYPE_CHECKING: # pragma: no cover assert isinstance(self.validator, Or) results: List[Result] = [] for a in self.validator.args: r: List[Result] = [] # Each of the `Or` validators can throw 0 to many `Result` instances. for v in Schema(a).validate(self.data): r.append(v) if any([x.status for x in r]) and not all([x.status for x in r]): results += [x for x in r if not x.status] else: results += r # None of the `Or` validators succeeded to validate `data`, so we report the issues back. if not any([x.status for x in results]): max_level = 3 for v in sorted(results, key=lambda x: x.level): if v.level > max_level: break max_level = v.level yield Result(v.status, v.error, path=v.path, level=v.level, data=v.data) def _data_key(self, key: Union[str, Optional, Or, AtMostOne]) -> Iterator[str]: """Map a key from the ``validator`` dictionary to the corresponding key(s) in the ``data`` dictionary. :param key: key from the ``validator`` attribute. :yields: keys that should be used to access corresponding value in the ``data`` attribute. """ # If the key was defined as a `str` object in `validator` attribute, then it is already the final key to access # the `data` dictionary. if isinstance(self.data, dict) and isinstance(key, str): yield key # If the key was defined as an `Optional` object in `validator` attribute, then its name is the key to access # the `data` dictionary. elif isinstance(key, Optional): yield key.name # If the key was defined as an `Or` object in `validator` attribute, then each of its values are the keys to # access the `data` dictionary. elif isinstance(key, Or) and isinstance(self.data, dict): # At least one of the `Or` entries should be available in the `data` dictionary. If we find at least one of # them in `data`, then we return all found entries so the caller method can validate them all. if any([item in self.data for item in key.args]): for item in key.args: if item in self.data: yield item # If none of the `Or` entries is available in the `data` dictionary, then we return all entries so the # caller method will issue errors that they are all absent. else: for item in key.args: yield item # If the key was defined as a `AtMostOne` object in `validator` attribute, then each of its values # are the keys to access the `data` dictionary. elif isinstance(key, AtMostOne) and isinstance(self.data, dict): # Yield back all of the entries from the `data` dictionary, each will be validated and then counted # to inform us if we've provided too many for item in key.args: if item in self.data: yield item def _get_type_name(python_type: Any) -> str: """Get a user-friendly name for a given Python type. :param python_type: Python type which user friendly name should be taken. :returns: User friendly name of the given Python type. """ types: Dict[Any, str] = {str: 'a string', int: 'an integer', float: 'a number', bool: 'a boolean', list: 'an array', dict: 'a dictionary'} return types.get(python_type, getattr(python_type, __name__, "unknown type")) def assert_(condition: bool, message: str = "Wrong value") -> None: """Assert that a given condition is ``True``. If the assertion fails, then throw a message. :param condition: result of a condition to be asserted. :param message: message to be thrown if the condition is ``False``. """ assert condition, message class IntValidator(object): """Validate an integer setting. :ivar min: minimum allowed value for the setting, if any. :ivar max: maximum allowed value for the setting, if any. :ivar base_unit: the base unit to convert the value to before checking if it's within *min* and *max* range. :ivar expected_type: the expected Python type. :ivar raise_assert: if an ``assert`` test should be performed regarding expected type and valid range. """ def __init__(self, min: OptionalType[int] = None, max: OptionalType[int] = None, base_unit: OptionalType[str] = None, expected_type: Any = None, raise_assert: bool = False) -> None: """Create an :class:`IntValidator` object with the given rules. :param min: minimum allowed value for the setting, if any. :param max: maximum allowed value for the setting, if any. :param base_unit: the base unit to convert the value to before checking if it's within *min* and *max* range. :param expected_type: the expected Python type. :param raise_assert: if an ``assert`` test should be performed regarding expected type and valid range. """ self.min = min self.max = max self.base_unit = base_unit if expected_type: self.expected_type = expected_type self.raise_assert = raise_assert def __call__(self, value: Any) -> bool: """Check if *value* is a valid integer and within the expected range. .. note:: If ``raise_assert`` is ``True`` and *value* is not valid, then an :class:`AssertionError` will be triggered. :param value: value to be checked against the rules defined for this :class:`IntValidator` instance. :returns: ``True`` if *value* is valid and within the expected range. """ value = parse_int(value, self.base_unit) ret = isinstance(value, int)\ and (self.min is None or value >= self.min)\ and (self.max is None or value <= self.max) if self.raise_assert: assert_(ret) return ret class EnumValidator(object): """Validate enum setting :ivar allowed_values: a ``set`` or ``CaseInsensitiveSet`` object with allowed enum values. :ivar raise_assert: if an ``assert`` call should be performed regarding expected type and valid range. """ def __init__(self, allowed_values: Tuple[str, ...], case_sensitive: bool = False, raise_assert: bool = False) -> None: """Create an :class:`EnumValidator` object with given allowed values. :param allowed_values: a tuple with allowed enum values :param case_sensitive: set to ``True`` to do case sensitive comparisons :param raise_assert: if an ``assert`` call should be performed regarding expected values. """ self.allowed_values = set(allowed_values) if case_sensitive else CaseInsensitiveSet(allowed_values) self.raise_assert = raise_assert def __call__(self, value: Any) -> bool: """Check if provided *value* could be found within *allowed_values*. .. note:: If ``raise_assert`` is ``True`` and *value* is not valid, then an ``AssertionError`` will be triggered. :param value: value to be checked. :returns: ``True`` if *value* could be found within *allowed_values*. """ ret = isinstance(value, str) and value in self.allowed_values if self.raise_assert: assert_(ret) return ret def validate_watchdog_mode(value: Any) -> None: """Validate ``watchdog.mode`` configuration option. :param value: value of ``watchdog.mode`` to be validated. """ assert_(isinstance(value, (str, bool)), "expected type is not a string") assert_(value in (False, "off", "automatic", "required")) userattributes = {"username": "", Optional("password"): ""} available_dcs = [m.split(".")[-1] for m in dcs_modules()] setattr(validate_host_port_list, 'expected_type', list) setattr(comma_separated_host_port, 'expected_type', str) setattr(validate_connect_address, 'expected_type', str) setattr(validate_host_port_listen, 'expected_type', str) setattr(validate_host_port_listen_multiple_hosts, 'expected_type', str) setattr(validate_data_dir, 'expected_type', str) setattr(validate_binary_name, 'expected_type', str) validate_etcd = { Or("host", "hosts", "srv", "srv_suffix", "url", "proxy"): Case({ "host": validate_host_port, "hosts": Or(comma_separated_host_port, [validate_host_port]), "srv": str, "srv_suffix": str, "url": str, "proxy": str }), Optional("protocol"): str, Optional("username"): str, Optional("password"): str, Optional("cacert"): str, Optional("cert"): str, Optional("key"): str } schema = Schema({ "name": str, "scope": str, Optional("ctl"): { Optional("insecure"): bool, Optional("cacert"): str, Optional("certfile"): str, Optional("keyfile"): str, Optional("keyfile_password"): str }, "restapi": { "listen": validate_host_port_listen, "connect_address": validate_connect_address, Optional("authentication"): { "username": str, "password": str }, Optional("certfile"): str, Optional("keyfile"): str, Optional("keyfile_password"): str, Optional("cafile"): str, Optional("ciphers"): str, Optional("verify_client"): EnumValidator(("none", "optional", "required"), case_sensitive=True, raise_assert=True), Optional("allowlist"): [str], Optional("allowlist_include_members"): bool, Optional("http_extra_headers"): dict, Optional("https_extra_headers"): dict, Optional("request_queue_size"): IntValidator(min=0, max=4096, expected_type=int, raise_assert=True) }, Optional("bootstrap"): { "dcs": { Optional("ttl"): IntValidator(min=20, raise_assert=True), Optional("loop_wait"): IntValidator(min=1, raise_assert=True), Optional("retry_timeout"): IntValidator(min=3, raise_assert=True), Optional("maximum_lag_on_failover"): IntValidator(min=0, raise_assert=True), Optional("maximum_lag_on_syncnode"): IntValidator(min=-1, raise_assert=True), Optional("postgresql"): { Optional("parameters"): { Optional("max_connections"): IntValidator(1, 262143, raise_assert=True), Optional("max_locks_per_transaction"): IntValidator(10, 2147483647, raise_assert=True), Optional("max_prepared_transactions"): IntValidator(0, 262143, raise_assert=True), Optional("max_replication_slots"): IntValidator(0, 262143, raise_assert=True), Optional("max_wal_senders"): IntValidator(0, 262143, raise_assert=True), Optional("max_worker_processes"): IntValidator(0, 262143, raise_assert=True), }, Optional("use_pg_rewind"): bool, Optional("pg_hba"): [str], Optional("pg_ident"): [str], Optional("pg_ctl_timeout"): IntValidator(min=0, raise_assert=True), Optional("use_slots"): bool, }, Optional("primary_start_timeout"): IntValidator(min=0, raise_assert=True), Optional("primary_stop_timeout"): IntValidator(min=0, raise_assert=True), Optional("standby_cluster"): { Or("host", "port", "restore_command"): Case({ "host": str, "port": IntValidator(max=65535, expected_type=int, raise_assert=True), "restore_command": str }), Optional("primary_slot_name"): str, Optional("create_replica_methods"): [str], Optional("archive_cleanup_command"): str, Optional("recovery_min_apply_delay"): str }, Optional("synchronous_mode"): bool, Optional("synchronous_mode_strict"): bool, Optional("synchronous_node_count"): IntValidator(min=1, raise_assert=True), }, Optional("initdb"): [Or(str, dict)], Optional("method"): str }, Or(*available_dcs): Case({ "consul": { Or("host", "url"): Case({ "host": validate_host_port, "url": str }), Optional("port"): IntValidator(max=65535, expected_type=int, raise_assert=True), Optional("scheme"): str, Optional("token"): str, Optional("verify"): bool, Optional("cacert"): str, Optional("cert"): str, Optional("key"): str, Optional("dc"): str, Optional("checks"): [str], Optional("register_service"): bool, Optional("service_tags"): [str], Optional("service_check_interval"): str, Optional("service_check_tls_server_name"): str, Optional("consistency"): EnumValidator(('default', 'consistent', 'stale'), case_sensitive=True, raise_assert=True) }, "etcd": validate_etcd, "etcd3": validate_etcd, "exhibitor": { "hosts": [str], "port": IntValidator(max=65535, expected_type=int, raise_assert=True), Optional("poll_interval"): IntValidator(min=1, expected_type=int, raise_assert=True), }, "raft": { "self_addr": validate_connect_address, Optional("bind_addr"): validate_host_port_listen, "partner_addrs": validate_host_port_list, Optional("data_dir"): str, Optional("password"): str }, "zookeeper": { "hosts": Or(comma_separated_host_port, [validate_host_port]), Optional("use_ssl"): bool, Optional("cacert"): str, Optional("cert"): str, Optional("key"): str, Optional("key_password"): str, Optional("verify"): bool, Optional("set_acls"): dict }, "kubernetes": { "labels": {}, Optional("bypass_api_service"): bool, Optional("namespace"): str, Optional("scope_label"): str, Optional("role_label"): str, Optional("leader_label_value"): str, Optional("follower_label_value"): str, Optional("standby_leader_label_value"): str, Optional("tmp_role_label"): str, Optional("use_endpoints"): bool, Optional("pod_ip"): Or(is_ipv4_address, is_ipv6_address), Optional("ports"): [{"name": str, "port": IntValidator(max=65535, expected_type=int, raise_assert=True)}], Optional("cacert"): str, Optional("retriable_http_codes"): Or(int, [int]), }, }), Optional("citus"): { "database": str, "group": IntValidator(min=0, expected_type=int, raise_assert=True), }, "postgresql": { "listen": validate_host_port_listen_multiple_hosts, "connect_address": validate_connect_address, Optional("proxy_address"): validate_connect_address, "authentication": { "replication": userattributes, "superuser": userattributes, Optional("rewind"): userattributes }, "data_dir": validate_data_dir, Optional("bin_name"): { Optional("pg_ctl"): validate_binary_name, Optional("initdb"): validate_binary_name, Optional("pg_controldata"): validate_binary_name, Optional("pg_basebackup"): validate_binary_name, Optional("postgres"): validate_binary_name, Optional("pg_isready"): validate_binary_name, Optional("pg_rewind"): validate_binary_name, }, Optional("bin_dir", ""): BinDirectory(), Optional("parameters"): { Optional("unix_socket_directories"): str }, Optional("pg_hba"): [str], Optional("pg_ident"): [str], Optional("pg_ctl_timeout"): IntValidator(min=0, raise_assert=True), Optional("use_pg_rewind"): bool }, Optional("watchdog"): { Optional("mode"): validate_watchdog_mode, Optional("device"): str, Optional("safety_margin"): IntValidator(min=-1, expected_type=int, raise_assert=True), }, Optional("tags"): { AtMostOne("nofailover", "failover_priority"): Case({ "nofailover": bool, "failover_priority": IntValidator(min=0, expected_type=int, raise_assert=True), }), Optional("clonefrom"): bool, Optional("noloadbalance"): bool, Optional("replicatefrom"): str, Optional("nosync"): bool } }) patroni-3.2.2/patroni/version.py000066400000000000000000000002001455170150700167020ustar00rootroot00000000000000"""This module specifies the current Patroni version. :var __version__: the current Patroni version. """ __version__ = '3.2.2' patroni-3.2.2/patroni/watchdog/000077500000000000000000000000001455170150700164535ustar00rootroot00000000000000patroni-3.2.2/patroni/watchdog/__init__.py000066400000000000000000000001421455170150700205610ustar00rootroot00000000000000from patroni.watchdog.base import WatchdogError, Watchdog __all__ = ['WatchdogError', 'Watchdog'] patroni-3.2.2/patroni/watchdog/base.py000066400000000000000000000305001455170150700177350ustar00rootroot00000000000000import abc import logging import platform import sys from threading import RLock from typing import Any, Callable, Dict, Optional, Union from ..config import Config from ..exceptions import WatchdogError __all__ = ['WatchdogError', 'Watchdog'] logger = logging.getLogger(__name__) MODE_REQUIRED = 'required' # Will not run if a watchdog is not available MODE_AUTOMATIC = 'automatic' # Will use a watchdog if one is available MODE_OFF = 'off' # Will not try to use a watchdog def parse_mode(mode: Union[bool, str]) -> str: if mode is False: return MODE_OFF mode = str(mode).lower() if mode in ['require', 'required']: return MODE_REQUIRED elif mode in ['auto', 'automatic']: return MODE_AUTOMATIC else: if mode not in ['off', 'disable', 'disabled']: logger.warning("Watchdog mode {0} not recognized, disabling watchdog".format(mode)) return MODE_OFF def synchronized(func: Callable[..., Any]) -> Callable[..., Any]: def wrapped(self: 'Watchdog', *args: Any, **kwargs: Any) -> Any: with self.lock: return func(self, *args, **kwargs) return wrapped class WatchdogConfig(object): """Helper to contain a snapshot of configuration""" def __init__(self, config: Config) -> None: watchdog_config = config.get("watchdog") or {'mode': 'automatic'} self.mode = parse_mode(watchdog_config.get('mode', 'automatic')) self.ttl = config['ttl'] self.loop_wait = config['loop_wait'] self.safety_margin = watchdog_config.get('safety_margin', 5) self.driver = watchdog_config.get('driver', 'default') self.driver_config = dict((k, v) for k, v in watchdog_config.items() if k not in ['mode', 'safety_margin', 'driver']) def __eq__(self, other: Any) -> bool: return isinstance(other, WatchdogConfig) and \ all(getattr(self, attr) == getattr(other, attr) for attr in ['mode', 'ttl', 'loop_wait', 'safety_margin', 'driver', 'driver_config']) def __ne__(self, other: Any) -> bool: return not self == other def get_impl(self) -> 'WatchdogBase': if self.driver == 'testing': # pragma: no cover from patroni.watchdog.linux import TestingWatchdogDevice return TestingWatchdogDevice.from_config(self.driver_config) elif platform.system() == 'Linux' and self.driver == 'default': from patroni.watchdog.linux import LinuxWatchdogDevice return LinuxWatchdogDevice.from_config(self.driver_config) else: return NullWatchdog() @property def timeout(self) -> int: if self.safety_margin == -1: return int(self.ttl // 2) else: return self.ttl - self.safety_margin @property def timing_slack(self) -> int: return self.timeout - self.loop_wait class Watchdog(object): """Facade to dynamically manage watchdog implementations and handle config changes. When activation fails underlying implementation will be switched to a Null implementation. To avoid log spam activation will only be retried when watchdog configuration is changed.""" def __init__(self, config: Config) -> None: self.config = WatchdogConfig(config) self.active_config: WatchdogConfig = self.config self.lock = RLock() self.active = False if self.config.mode == MODE_OFF: self.impl = NullWatchdog() else: self.impl = self.config.get_impl() if self.config.mode == MODE_REQUIRED and self.impl.is_null: logger.error("Configuration requires a watchdog, but watchdog is not supported on this platform.") sys.exit(1) @synchronized def reload_config(self, config: Config) -> None: self.config = WatchdogConfig(config) # Turning a watchdog off can always be done immediately if self.config.mode == MODE_OFF: if self.active: self._disable() self.active_config = self.config self.impl = NullWatchdog() # If watchdog is not active we can apply config immediately to show any warnings early. Otherwise we need to # delay until next time a keepalive is sent so timeout matches up with leader key update. if not self.active: if self.config.driver != self.active_config.driver or \ self.config.driver_config != self.active_config.driver_config: self.impl = self.config.get_impl() self.active_config = self.config @synchronized def activate(self) -> bool: """Activates the watchdog device with suitable timeouts. While watchdog is active keepalive needs to be called every time loop_wait expires. :returns False if a safe watchdog could not be configured, but is required. """ self.active = True return self._activate() def _activate(self) -> bool: self.active_config = self.config if self.config.timing_slack < 0: logger.warning('Watchdog not supported because leader TTL {0} is less than 2x loop_wait {1}' .format(self.config.ttl, self.config.loop_wait)) self.impl = NullWatchdog() try: self.impl.open() actual_timeout = self._set_timeout() except WatchdogError as e: logger.warning("Could not activate %s: %s", self.impl.describe(), e) self.impl = NullWatchdog() actual_timeout = self.impl.get_timeout() if self.impl.is_running and not self.impl.can_be_disabled: logger.warning("Watchdog implementation can't be disabled." " Watchdog will trigger after Patroni loses leader key.") if not self.impl.is_running or actual_timeout and actual_timeout > self.config.timeout: if self.config.mode == MODE_REQUIRED: if self.impl.is_null: logger.error("Configuration requires watchdog, but watchdog could not be configured.") else: logger.error("Configuration requires watchdog, but a safe watchdog timeout {0} could" " not be configured. Watchdog timeout is {1}.".format( self.config.timeout, actual_timeout)) return False else: if not self.impl.is_null: logger.warning("Watchdog timeout {0} seconds does not ensure safe termination within {1} seconds" .format(actual_timeout, self.config.timeout)) if self.is_running: logger.info("{0} activated with {1} second timeout, timing slack {2} seconds" .format(self.impl.describe(), actual_timeout, self.config.timing_slack)) else: if self.config.mode == MODE_REQUIRED: logger.error("Configuration requires watchdog, but watchdog could not be activated") return False return True def _set_timeout(self) -> Optional[int]: if self.impl.has_set_timeout(): self.impl.set_timeout(self.config.timeout) # Safety checks for watchdog implementations that don't support configurable timeouts actual_timeout = self.impl.get_timeout() if self.impl.is_running and actual_timeout < self.config.loop_wait: logger.error('loop_wait of {0} seconds is too long for watchdog {1} second timeout' .format(self.config.loop_wait, actual_timeout)) if self.impl.can_be_disabled: logger.info('Disabling watchdog due to unsafe timeout.') self.impl.close() self.impl = NullWatchdog() return None return actual_timeout @synchronized def disable(self) -> None: self._disable() self.active = False def _disable(self) -> None: try: if self.impl.is_running and not self.impl.can_be_disabled: # Give sysadmin some extra time to clean stuff up. self.impl.keepalive() logger.warning("Watchdog implementation can't be disabled. System will reboot after " "{0} seconds when watchdog times out.".format(self.impl.get_timeout())) self.impl.close() except WatchdogError as e: logger.error("Error while disabling watchdog: %s", e) @synchronized def keepalive(self) -> None: try: if self.active: self.impl.keepalive() # In case there are any pending configuration changes apply them now. if self.active and self.config != self.active_config: if self.config.mode != MODE_OFF and self.active_config.mode == MODE_OFF: self.impl = self.config.get_impl() self._activate() if self.config.driver != self.active_config.driver \ or self.config.driver_config != self.active_config.driver_config: self._disable() self.impl = self.config.get_impl() self._activate() if self.config.timeout != self.active_config.timeout: self.impl.set_timeout(self.config.timeout) if self.is_running: logger.info("{0} updated with {1} second timeout, timing slack {2} seconds" .format(self.impl.describe(), self.impl.get_timeout(), self.config.timing_slack)) self.active_config = self.config except WatchdogError as e: logger.error("Error while sending keepalive: %s", e) @property @synchronized def is_running(self) -> bool: return self.impl.is_running @property @synchronized def is_healthy(self) -> bool: if self.config.mode != MODE_REQUIRED: return True return self.config.timing_slack >= 0 and self.impl.is_healthy class WatchdogBase(abc.ABC): """A watchdog object when opened requires periodic calls to keepalive. When keepalive is not called within a timeout the system will be terminated.""" is_null = False @property def is_running(self) -> bool: """Returns True when watchdog is activated and capable of performing it's task.""" return False @property def is_healthy(self) -> bool: """Returns False when calling open() is known to fail.""" return False @property def can_be_disabled(self) -> bool: """Returns True when watchdog will be disabled by calling close(). Some watchdog devices will keep running no matter what once activated. May raise WatchdogError if called without calling open() first.""" return True @abc.abstractmethod def open(self) -> None: """Open watchdog device. When watchdog is opened keepalive must be called. Returns nothing on success or raises WatchdogError if the device could not be opened.""" @abc.abstractmethod def close(self) -> None: """Gracefully close watchdog device.""" @abc.abstractmethod def keepalive(self) -> None: """Resets the watchdog timer. Watchdog must be open when keepalive is called.""" @abc.abstractmethod def get_timeout(self) -> int: """Returns the current keepalive timeout in effect.""" def has_set_timeout(self) -> bool: """Returns True if setting a timeout is supported.""" return False def set_timeout(self, timeout: int) -> None: """Set the watchdog timer timeout. :param timeout: watchdog timeout in seconds""" raise WatchdogError("Setting timeout is not supported on {0}".format(self.describe())) def describe(self) -> str: """Human readable name for this device""" return self.__class__.__name__ @classmethod def from_config(cls, config: Dict[str, Any]) -> 'WatchdogBase': return cls() class NullWatchdog(WatchdogBase): """Null implementation when watchdog is not supported.""" is_null = True def open(self) -> None: return def close(self) -> None: return def keepalive(self) -> None: return def get_timeout(self) -> int: # A big enough number to not matter return 1000000000 patroni-3.2.2/patroni/watchdog/linux.py000066400000000000000000000205021455170150700201630ustar00rootroot00000000000000# pyright: reportConstantRedefinition=false import ctypes import os import platform from typing import Any, Dict, NamedTuple from .base import WatchdogBase, WatchdogError # Pythonification of linux/ioctl.h IOC_NONE = 0 IOC_WRITE = 1 IOC_READ = 2 IOC_NRBITS = 8 IOC_TYPEBITS = 8 IOC_SIZEBITS = 14 IOC_DIRBITS = 2 # Non-generic platform special cases machine = platform.machine() if machine in ['mips', 'sparc', 'powerpc', 'ppc64', 'ppc64le']: # pragma: no cover IOC_SIZEBITS = 13 IOC_DIRBITS = 3 IOC_NONE, IOC_WRITE = 1, 4 elif machine == 'parisc': # pragma: no cover IOC_WRITE, IOC_READ = 2, 1 IOC_NRSHIFT = 0 IOC_TYPESHIFT = IOC_NRSHIFT + IOC_NRBITS IOC_SIZESHIFT = IOC_TYPESHIFT + IOC_TYPEBITS IOC_DIRSHIFT = IOC_SIZESHIFT + IOC_SIZEBITS def IOW(type_: str, nr: int, size: int) -> int: return IOC(IOC_WRITE, type_, nr, size) def IOR(type_: str, nr: int, size: int) -> int: return IOC(IOC_READ, type_, nr, size) def IOWR(type_: str, nr: int, size: int) -> int: return IOC(IOC_READ | IOC_WRITE, type_, nr, size) def IOC(dir_: int, type_: str, nr: int, size: int) -> int: return (dir_ << IOC_DIRSHIFT) \ | (ord(type_) << IOC_TYPESHIFT) \ | (nr << IOC_NRSHIFT) \ | (size << IOC_SIZESHIFT) # Pythonification of linux/watchdog.h WATCHDOG_IOCTL_BASE = 'W' class watchdog_info(ctypes.Structure): _fields_ = [ ('options', ctypes.c_uint32), # Options the card/driver supports ('firmware_version', ctypes.c_uint32), # Firmware version of the card ('identity', ctypes.c_uint8 * 32), # Identity of the board ] struct_watchdog_info_size = ctypes.sizeof(watchdog_info) int_size = ctypes.sizeof(ctypes.c_int) WDIOC_GETSUPPORT = IOR(WATCHDOG_IOCTL_BASE, 0, struct_watchdog_info_size) WDIOC_GETSTATUS = IOR(WATCHDOG_IOCTL_BASE, 1, int_size) WDIOC_GETBOOTSTATUS = IOR(WATCHDOG_IOCTL_BASE, 2, int_size) WDIOC_GETTEMP = IOR(WATCHDOG_IOCTL_BASE, 3, int_size) WDIOC_SETOPTIONS = IOR(WATCHDOG_IOCTL_BASE, 4, int_size) WDIOC_KEEPALIVE = IOR(WATCHDOG_IOCTL_BASE, 5, int_size) WDIOC_SETTIMEOUT = IOWR(WATCHDOG_IOCTL_BASE, 6, int_size) WDIOC_GETTIMEOUT = IOR(WATCHDOG_IOCTL_BASE, 7, int_size) WDIOC_SETPRETIMEOUT = IOWR(WATCHDOG_IOCTL_BASE, 8, int_size) WDIOC_GETPRETIMEOUT = IOR(WATCHDOG_IOCTL_BASE, 9, int_size) WDIOC_GETTIMELEFT = IOR(WATCHDOG_IOCTL_BASE, 10, int_size) WDIOF_UNKNOWN = -1 # Unknown flag error WDIOS_UNKNOWN = -1 # Unknown status error WDIOF = { "OVERHEAT": 0x0001, # Reset due to CPU overheat "FANFAULT": 0x0002, # Fan failed "EXTERN1": 0x0004, # External relay 1 "EXTERN2": 0x0008, # External relay 2 "POWERUNDER": 0x0010, # Power bad/power fault "CARDRESET": 0x0020, # Card previously reset the CPU "POWEROVER": 0x0040, # Power over voltage "SETTIMEOUT": 0x0080, # Set timeout (in seconds) "MAGICCLOSE": 0x0100, # Supports magic close char "PRETIMEOUT": 0x0200, # Pretimeout (in seconds), get/set "ALARMONLY": 0x0400, # Watchdog triggers a management or other external alarm not a reboot "KEEPALIVEPING": 0x8000, # Keep alive ping reply } WDIOS = { "DISABLECARD": 0x0001, # Turn off the watchdog timer "ENABLECARD": 0x0002, # Turn on the watchdog timer "TEMPPANIC": 0x0004, # Kernel panic on temperature trip } # Implementation class WatchdogInfo(NamedTuple): """Watchdog descriptor from the kernel""" options: int version: int identity: str def __getattr__(self, name: str) -> bool: """Convenience has_XYZ attributes for checking WDIOF bits in options""" if name.startswith('has_') and name[4:] in WDIOF: return bool(self.options & WDIOF[name[4:]]) raise AttributeError("WatchdogInfo instance has no attribute '{0}'".format(name)) class LinuxWatchdogDevice(WatchdogBase): DEFAULT_DEVICE = '/dev/watchdog' def __init__(self, device: str) -> None: self.device = device self._support_cache = None self._fd = None @classmethod def from_config(cls, config: Dict[str, Any]) -> 'LinuxWatchdogDevice': device = config.get('device', cls.DEFAULT_DEVICE) return cls(device) @property def is_running(self) -> bool: return self._fd is not None @property def is_healthy(self) -> bool: return os.path.exists(self.device) and os.access(self.device, os.W_OK) def open(self) -> None: try: self._fd = os.open(self.device, os.O_WRONLY) except OSError as e: raise WatchdogError("Can't open watchdog device: {0}".format(e)) def close(self) -> None: if self._fd is not None: # self.is_running try: os.write(self._fd, b'V') os.close(self._fd) self._fd = None except OSError as e: raise WatchdogError("Error while closing {0}: {1}".format(self.describe(), e)) @property def can_be_disabled(self) -> bool: return self.get_support().has_MAGICCLOSE def _ioctl(self, func: int, arg: Any) -> None: """Runs the specified ioctl on the underlying fd. Raises WatchdogError if the device is closed. Raises OSError or IOError (Python 2) when the ioctl fails.""" if self._fd is None: raise WatchdogError("Watchdog device is closed") if os.name != 'nt': import fcntl fcntl.ioctl(self._fd, func, arg, True) def get_support(self) -> WatchdogInfo: if self._support_cache is None: info = watchdog_info() try: self._ioctl(WDIOC_GETSUPPORT, info) except (WatchdogError, OSError, IOError) as e: raise WatchdogError("Could not get information about watchdog device: {}".format(e)) self._support_cache = WatchdogInfo(info.options, info.firmware_version, bytearray(info.identity).decode(errors='ignore').rstrip('\x00')) return self._support_cache def describe(self) -> str: dev_str = " at {0}".format(self.device) if self.device != self.DEFAULT_DEVICE else "" ver_str = "" identity = "Linux watchdog device" if self._fd: try: _, version, identity = self.get_support() ver_str = " (firmware {0})".format(version) if version else "" except WatchdogError: pass return identity + ver_str + dev_str def keepalive(self) -> None: if self._fd is None: raise WatchdogError("Watchdog device is closed") try: os.write(self._fd, b'1') except OSError as e: raise WatchdogError("Could not send watchdog keepalive: {0}".format(e)) def has_set_timeout(self) -> bool: """Returns True if setting a timeout is supported.""" return self.get_support().has_SETTIMEOUT def set_timeout(self, timeout: int) -> None: timeout = int(timeout) if not 0 < timeout < 0xFFFF: raise WatchdogError("Invalid timeout {0}. Supported values are between 1 and 65535".format(timeout)) try: self._ioctl(WDIOC_SETTIMEOUT, ctypes.c_int(timeout)) except (WatchdogError, OSError, IOError) as e: raise WatchdogError("Could not set timeout on watchdog device: {}".format(e)) def get_timeout(self) -> int: timeout = ctypes.c_int() try: self._ioctl(WDIOC_GETTIMEOUT, timeout) except (WatchdogError, OSError, IOError) as e: raise WatchdogError("Could not get timeout on watchdog device: {}".format(e)) return timeout.value class TestingWatchdogDevice(LinuxWatchdogDevice): # pragma: no cover """Converts timeout ioctls to regular writes that can be intercepted from a named pipe.""" timeout = 60 def get_support(self) -> WatchdogInfo: return WatchdogInfo(WDIOF['MAGICCLOSE'] | WDIOF['SETTIMEOUT'], 0, "Watchdog test harness") def set_timeout(self, timeout: int) -> None: if self._fd is None: raise WatchdogError("Watchdog device is closed") buf = "Ctimeout={0}\n".format(timeout).encode('utf8') while len(buf): buf = buf[os.write(self._fd, buf):] self.timeout = timeout def get_timeout(self) -> int: return self.timeout patroni-3.2.2/patroni_raft_controller.py000077500000000000000000000001471455170150700205110ustar00rootroot00000000000000#!/usr/bin/env python from patroni.raft_controller import main if __name__ == '__main__': main() patroni-3.2.2/patronictl.py000077500000000000000000000001341455170150700157310ustar00rootroot00000000000000#!/usr/bin/env python from patroni.ctl import ctl if __name__ == '__main__': ctl(None) patroni-3.2.2/postgres0.yml000066400000000000000000000106531455170150700156550ustar00rootroot00000000000000scope: batman #namespace: /service/ name: postgresql0 restapi: listen: 127.0.0.1:8008 connect_address: 127.0.0.1:8008 # cafile: /etc/ssl/certs/ssl-cacert-snakeoil.pem # certfile: /etc/ssl/certs/ssl-cert-snakeoil.pem # keyfile: /etc/ssl/private/ssl-cert-snakeoil.key # authentication: # username: username # password: password #ctl: # insecure: false # Allow connections to Patroni REST API without verifying certificates # certfile: /etc/ssl/certs/ssl-cert-snakeoil.pem # keyfile: /etc/ssl/private/ssl-cert-snakeoil.key # cacert: /etc/ssl/certs/ssl-cacert-snakeoil.pem #citus: # database: citus # group: 0 # coordinator etcd: #Provide host to do the initial discovery of the cluster topology: host: 127.0.0.1:2379 #Or use "hosts" to provide multiple endpoints #Could be a comma separated string: #hosts: host1:port1,host2:port2 #or an actual yaml list: #hosts: #- host1:port1 #- host2:port2 #Once discovery is complete Patroni will use the list of advertised clientURLs #It is possible to change this behavior through by setting: #use_proxies: true #raft: # data_dir: . # self_addr: 127.0.0.1:2222 # partner_addrs: # - 127.0.0.1:2223 # - 127.0.0.1:2224 # The bootstrap configuration. Works only when the cluster is not yet initialized. # If the cluster is already initialized, all changes in the `bootstrap` section are ignored! bootstrap: # This section will be written into Etcd:///config after initializing new cluster # and all other cluster members will use it as a `global configuration`. # WARNING! If you want to change any of the parameters that were set up # via `bootstrap.dcs` section, please use `patronictl edit-config`! dcs: ttl: 30 loop_wait: 10 retry_timeout: 10 maximum_lag_on_failover: 1048576 # primary_start_timeout: 300 # synchronous_mode: false #standby_cluster: #host: 127.0.0.1 #port: 1111 #primary_slot_name: patroni postgresql: use_pg_rewind: true pg_hba: # For kerberos gss based connectivity (discard @.*$) #- host replication replicator 127.0.0.1/32 gss include_realm=0 #- host all all 0.0.0.0/0 gss include_realm=0 - host replication replicator 127.0.0.1/32 md5 - host all all 0.0.0.0/0 md5 # - hostssl all all 0.0.0.0/0 md5 # use_slots: true parameters: # wal_level: hot_standby # hot_standby: "on" # max_connections: 100 # max_worker_processes: 8 # wal_keep_segments: 8 # max_wal_senders: 10 # max_replication_slots: 10 # max_prepared_transactions: 0 # max_locks_per_transaction: 64 # wal_log_hints: "on" # track_commit_timestamp: "off" # archive_mode: "on" # archive_timeout: 1800s # archive_command: mkdir -p ../wal_archive && test ! -f ../wal_archive/%f && cp %p ../wal_archive/%f # recovery_conf: # restore_command: cp ../wal_archive/%f %p # some desired options for 'initdb' initdb: # Note: It needs to be a list (some options need values, others are switches) - encoding: UTF8 - data-checksums # Additional script to be launched after initial cluster creation (will be passed the connection URL as parameter) # post_init: /usr/local/bin/setup_cluster.sh postgresql: listen: 127.0.0.1:5432 connect_address: 127.0.0.1:5432 # proxy_address: 127.0.0.1:5433 # The address of connection pool (e.g., pgbouncer) running next to Patroni/Postgres. Only for service discovery. data_dir: data/postgresql0 # bin_dir: # config_dir: pgpass: /tmp/pgpass0 authentication: replication: username: replicator password: rep-pass superuser: username: postgres password: zalando rewind: # Has no effect on postgres 10 and lower username: rewind_user password: rewind_password # Server side kerberos spn # krbsrvname: postgres parameters: # Fully qualified kerberos ticket file for the running user # same as KRB5CCNAME used by the GSS # krb_server_keyfile: /var/spool/keytabs/postgres unix_socket_directories: '..' # parent directory of data_dir # Additional fencing script executed after acquiring the leader lock but before promoting the replica #pre_promote: /path/to/pre_promote.sh #watchdog: # mode: automatic # Allowed values: off, automatic, required # device: /dev/watchdog # safety_margin: 5 tags: # failover_priority: 1 noloadbalance: false clonefrom: false nosync: false patroni-3.2.2/postgres1.yml000066400000000000000000000104411455170150700156510ustar00rootroot00000000000000scope: batman #namespace: /service/ name: postgresql1 restapi: listen: 127.0.0.1:8009 connect_address: 127.0.0.1:8009 # cafile: /etc/ssl/certs/ssl-cacert-snakeoil.pem # certfile: /etc/ssl/certs/ssl-cert-snakeoil.pem # keyfile: /etc/ssl/private/ssl-cert-snakeoil.key # authentication: # username: username # password: password #ctl: # insecure: false # Allow connections to Patroni REST API without verifying certificates # certfile: /etc/ssl/certs/ssl-cert-snakeoil.pem # keyfile: /etc/ssl/private/ssl-cert-snakeoil.key # cacert: /etc/ssl/certs/ssl-cacert-snakeoil.pem #citus: # database: citus # group: 1 # worker etcd: #Provide host to do the initial discovery of the cluster topology: host: 127.0.0.1:2379 #Or use "hosts" to provide multiple endpoints #Could be a comma separated string: #hosts: host1:port1,host2:port2 #or an actual yaml list: #hosts: #- host1:port1 #- host2:port2 #Once discovery is complete Patroni will use the list of advertised clientURLs #It is possible to change this behavior through by setting: #use_proxies: true #raft: # data_dir: . # self_addr: 127.0.0.1:2223 # partner_addrs: # - 127.0.0.1:2222 # - 127.0.0.1:2224 # The bootstrap configuration. Works only when the cluster is not yet initialized. # If the cluster is already initialized, all changes in the `bootstrap` section are ignored! bootstrap: # This section will be written into Etcd:///config after initializing new cluster # and all other cluster members will use it as a `global configuration`. # WARNING! If you want to change any of the parameters that were set up # via `bootstrap.dcs` section, please use `patronictl edit-config`! dcs: ttl: 30 loop_wait: 10 retry_timeout: 10 maximum_lag_on_failover: 1048576 postgresql: use_pg_rewind: true pg_hba: # For kerberos gss based connectivity (discard @.*$) #- host replication replicator 127.0.0.1/32 gss include_realm=0 #- host all all 0.0.0.0/0 gss include_realm=0 - host replication replicator 127.0.0.1/32 md5 - host all all 0.0.0.0/0 md5 # - hostssl all all 0.0.0.0/0 md5 # use_slots: true parameters: # wal_level: hot_standby # hot_standby: "on" # max_connections: 100 # max_worker_processes: 8 # wal_keep_segments: 8 # max_wal_senders: 10 # max_replication_slots: 10 # max_prepared_transactions: 0 # max_locks_per_transaction: 64 # wal_log_hints: "on" # track_commit_timestamp: "off" # archive_mode: "on" # archive_timeout: 1800s # archive_command: mkdir -p ../wal_archive && test ! -f ../wal_archive/%f && cp %p ../wal_archive/%f # recovery_conf: # restore_command: cp ../wal_archive/%f %p # some desired options for 'initdb' initdb: # Note: It needs to be a list (some options need values, others are switches) - encoding: UTF8 - data-checksums # Additional script to be launched after initial cluster creation (will be passed the connection URL as parameter) # post_init: /usr/local/bin/setup_cluster.sh postgresql: listen: 127.0.0.1:5433 connect_address: 127.0.0.1:5433 # proxy_address: 127.0.0.1:5434 # The address of connection pool (e.g., pgbouncer) running next to Patroni/Postgres. Only for service discovery. data_dir: data/postgresql1 # bin_dir: # config_dir: pgpass: /tmp/pgpass1 authentication: replication: username: replicator password: rep-pass superuser: username: postgres password: zalando rewind: # Has no effect on postgres 10 and lower username: rewind_user password: rewind_password # Server side kerberos spn # krbsrvname: postgres parameters: # Fully qualified kerberos ticket file for the running user # same as KRB5CCNAME used by the GSS # krb_server_keyfile: /var/spool/keytabs/postgres unix_socket_directories: '..' # parent directory of data_dir basebackup: - verbose - max-rate: 100M # - waldir: /pg-wal-mount/external-waldir # only needed in case pg_wal is symlinked outside of data_dir # Additional fencing script executed after acquiring the leader lock but before promoting the replica #pre_promote: /path/to/pre_promote.sh tags: # failover_priority: 1 noloadbalance: false clonefrom: false patroni-3.2.2/postgres2.yml000066400000000000000000000075501455170150700156610ustar00rootroot00000000000000scope: batman #namespace: /service/ name: postgresql2 restapi: listen: 127.0.0.1:8010 connect_address: 127.0.0.1:8010 # cafile: /etc/ssl/certs/ssl-cacert-snakeoil.pem # certfile: /etc/ssl/certs/ssl-cert-snakeoil.pem # keyfile: /etc/ssl/private/ssl-cert-snakeoil.key authentication: username: username password: password #ctl: # insecure: false # Allow connections to Patroni REST API without verifying certificates # certfile: /etc/ssl/certs/ssl-cert-snakeoil.pem # keyfile: /etc/ssl/private/ssl-cert-snakeoil.key # cacert: /etc/ssl/certs/ssl-cacert-snakeoil.pem #citus: # database: citus # group: 1 # worker etcd: #Provide host to do the initial discovery of the cluster topology: host: 127.0.0.1:2379 #Or use "hosts" to provide multiple endpoints #Could be a comma separated string: #hosts: host1:port1,host2:port2 #or an actual yaml list: #hosts: #- host1:port1 #- host2:port2 #Once discovery is complete Patroni will use the list of advertised clientURLs #It is possible to change this behavior through by setting: #use_proxies: true #raft: # data_dir: . # self_addr: 127.0.0.1:2224 # partner_addrs: # - 127.0.0.1:2222 # - 127.0.0.1:2223 # The bootstrap configuration. Works only when the cluster is not yet initialized. # If the cluster is already initialized, all changes in the `bootstrap` section are ignored! bootstrap: # This section will be written into Etcd:///config after initializing new cluster # and all other cluster members will use it as a `global configuration`. # WARNING! If you want to change any of the parameters that were set up # via `bootstrap.dcs` section, please use `patronictl edit-config`! dcs: ttl: 30 loop_wait: 10 retry_timeout: 10 maximum_lag_on_failover: 1048576 postgresql: use_pg_rewind: true pg_hba: # For kerberos gss based connectivity (discard @.*$) #- host replication replicator 127.0.0.1/32 gss include_realm=0 #- host all all 0.0.0.0/0 gss include_realm=0 - host replication replicator 127.0.0.1/32 md5 - host all all 0.0.0.0/0 md5 # - hostssl all all 0.0.0.0/0 md5 # use_slots: true parameters: # wal_level: hot_standby # hot_standby: "on" # max_connections: 100 # max_worker_processes: 8 # wal_keep_segments: 8 # max_wal_senders: 10 # max_replication_slots: 10 # max_prepared_transactions: 0 # max_locks_per_transaction: 64 # wal_log_hints: "on" # track_commit_timestamp: "off" # archive_mode: "on" # archive_timeout: 1800s # archive_command: mkdir -p ../wal_archive && test ! -f ../wal_archive/%f && cp %p ../wal_archive/%f # recovery_conf: # restore_command: cp ../wal_archive/%f %p # some desired options for 'initdb' initdb: # Note: It needs to be a list (some options need values, others are switches) - encoding: UTF8 - data-checksums postgresql: listen: 127.0.0.1:5434 connect_address: 127.0.0.1:5434 # proxy_address: 127.0.0.1:5435 # The address of connection pool (e.g., pgbouncer) running next to Patroni/Postgres. Only for service discovery. data_dir: data/postgresql2 # bin_dir: # config_dir: pgpass: /tmp/pgpass2 authentication: replication: username: replicator password: rep-pass superuser: username: postgres password: zalando rewind: # Has no effect on postgres 10 and lower username: rewind_user password: rewind_password # Server side kerberos spn # krbsrvname: postgres parameters: # Fully qualified kerberos ticket file for the running user # same as KRB5CCNAME used by the GSS # krb_server_keyfile: /var/spool/keytabs/postgres unix_socket_directories: '..' # parent directory of data_dir tags: # failover_priority: 1 noloadbalance: false clonefrom: false # replicatefrom: postgresql1 patroni-3.2.2/pyrightconfig.json000066400000000000000000000005111455170150700167430ustar00rootroot00000000000000{ "include": [ "patroni" ], "exclude": [ "**/__pycache__" ], "ignore": [ ], "defineConstant": { "DEBUG": true }, "stubPath": "typings/", "reportMissingImports": true, "reportMissingTypeStubs": false, "pythonVersion": "3.11", "pythonPlatform": "All", "typeCheckingMode": "strict" } patroni-3.2.2/release.sh000077500000000000000000000015221455170150700151560ustar00rootroot00000000000000#!/bin/bash # Release process: # 1. Open a PR that updates release notes, Patroni version and pyright version in the tests workflow. # 2. Resolve possible typing issues. # 3. Merge the PR. # 4. Run release.sh # 5. After the new tag is pushed, the .github/workflows/release.yaml will run tests and upload the new package to test.pypi.org # 6. Once the release is created, the .github/workflows/release.yaml will run tests and upload the new package to pypi.org ## Bail out on any non-zero exitcode from the called processes set -xe if python3 --version &> /dev/null; then alias python=python3 shopt -s expand_aliases fi python --version git --version version=$(python -c 'from patroni.version import __version__; print(__version__)') python setup.py clean python setup.py test python setup.py flake8 git tag "v$version" git push --tags patroni-3.2.2/requirements.dev.txt000066400000000000000000000001201455170150700172310ustar00rootroot00000000000000psycopg2-binary behave coverage flake8>=3.0.0 mock pytest-cov pytest setuptools patroni-3.2.2/requirements.docs.txt000066400000000000000000000001141455170150700174060ustar00rootroot00000000000000sphinx>=4 sphinx_rtd_theme>1 sphinxcontrib-apidoc sphinx-github-style<1.0.3 patroni-3.2.2/requirements.txt000066400000000000000000000003101455170150700164550ustar00rootroot00000000000000urllib3>=1.19.1,!=1.21 boto3 PyYAML kazoo>=1.3.1 python-etcd>=0.4.3,<0.5 python-consul>=0.7.1 click>=4.1 prettytable>=0.7 python-dateutil pysyncobj>=0.3.8 cryptography>=1.4 psutil>=2.0.0 ydiff>=1.2.0 patroni-3.2.2/setup.py000066400000000000000000000142731455170150700147200ustar00rootroot00000000000000#!/usr/bin/env python """ Setup file for patroni """ import inspect import logging import os import sys from setuptools import Command, find_packages, setup __location__ = os.path.join(os.getcwd(), os.path.dirname(inspect.getfile(inspect.currentframe()))) NAME = 'patroni' MAIN_PACKAGE = NAME DESCRIPTION = 'PostgreSQL High-Available orchestrator and CLI' LICENSE = 'The MIT License' URL = 'https://github.com/zalando/patroni' AUTHOR = 'Alexander Kukushkin, Polina Bungina' AUTHOR_EMAIL = 'akukushkin@microsoft.com, polina.bungina@zalando.de' KEYWORDS = 'etcd governor patroni postgresql postgres ha haproxy confd' +\ ' zookeeper exhibitor consul streaming replication kubernetes k8s' EXTRAS_REQUIRE = {'aws': ['boto3'], 'etcd': ['python-etcd'], 'etcd3': ['python-etcd'], 'consul': ['python-consul'], 'exhibitor': ['kazoo'], 'zookeeper': ['kazoo'], 'kubernetes': [], 'raft': ['pysyncobj', 'cryptography']} # Add here all kinds of additional classifiers as defined under # https://pypi.python.org/pypi?%3Aaction=list_classifiers CLASSIFIERS = [ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Operating System :: MacOS', 'Operating System :: POSIX :: Linux', 'Operating System :: POSIX :: BSD :: FreeBSD', 'Operating System :: Microsoft :: Windows', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: Implementation :: CPython', ] CONSOLE_SCRIPTS = ['patroni = patroni.__main__:main', 'patronictl = patroni.ctl:ctl', 'patroni_raft_controller = patroni.raft_controller:main', "patroni_wale_restore = patroni.scripts.wale_restore:main", "patroni_aws = patroni.scripts.aws:main"] class _Command(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass class Flake8(_Command): def package_files(self): seen_package_directories = () directories = self.distribution.package_dir or {} empty_directory_exists = "" in directories packages = self.distribution.packages or [] for package in packages: if package in directories: package_directory = directories[package] elif empty_directory_exists: package_directory = os.path.join(directories[""], package) else: package_directory = package if not package_directory.startswith(seen_package_directories): seen_package_directories += (package_directory + ".",) yield package_directory def targets(self): return [package for package in self.package_files()] + ['tests', 'features', 'setup.py'] def run(self): from flake8.main.cli import main logging.getLogger().setLevel(logging.ERROR) raise SystemExit(main(self.targets())) class PyTest(_Command): def run(self): try: import pytest except Exception: raise RuntimeError('py.test is not installed, run: pip install pytest') logging.getLogger().setLevel(logging.WARNING) args = ['--verbose', 'tests', '--doctest-modules', MAIN_PACKAGE] +\ ['-s' if logging.getLogger().getEffectiveLevel() < logging.WARNING else '--capture=fd'] +\ ['--cov', MAIN_PACKAGE, '--cov-report', 'term-missing', '--cov-report', 'xml'] errno = pytest.main(args=args) sys.exit(errno) def read(fname): with open(os.path.join(__location__, fname), encoding='utf-8') as fd: return fd.read() def get_versions(): old_modules = sys.modules.copy() try: from patroni import MIN_PSYCOPG2, MIN_PSYCOPG3 from patroni.version import __version__ return __version__, MIN_PSYCOPG2, MIN_PSYCOPG3 finally: sys.modules.clear() sys.modules.update(old_modules) def main(): logging.basicConfig(format='%(message)s', level=os.getenv('LOGLEVEL', logging.WARNING)) install_requires = [] for r in read('requirements.txt').split('\n'): r = r.strip() if r == '': continue extra = False for e, deps in EXTRAS_REQUIRE.items(): for i, v in enumerate(deps): if r.startswith(v): deps[i] = r EXTRAS_REQUIRE[e] = deps extra = True if not extra: install_requires.append(r) # Just for convenience, if someone wants to install dependencies for all extras EXTRAS_REQUIRE['all'] = list({e for extras in EXTRAS_REQUIRE.values() for e in extras}) patroni_version, min_psycopg2, min_psycopg3 = get_versions() # Make it possible to specify psycopg dependency as extra for name, version in {'psycopg[binary]': min_psycopg3, 'psycopg2': min_psycopg2, 'psycopg2-binary': None}.items(): EXTRAS_REQUIRE[name] = [name + ('>=' + '.'.join(map(str, version)) if version else '')] EXTRAS_REQUIRE['psycopg3'] = EXTRAS_REQUIRE.pop('psycopg[binary]') setup( name=NAME, version=patroni_version, url=URL, author=AUTHOR, author_email=AUTHOR_EMAIL, description=DESCRIPTION, license=LICENSE, keywords=KEYWORDS, long_description=read('README.rst'), classifiers=CLASSIFIERS, packages=find_packages(exclude=['tests', 'tests.*']), package_data={MAIN_PACKAGE: [ "postgresql/available_parameters/*.yml", "postgresql/available_parameters/*.yaml", ]}, install_requires=install_requires, extras_require=EXTRAS_REQUIRE, cmdclass={'test': PyTest, 'flake8': Flake8}, entry_points={'console_scripts': CONSOLE_SCRIPTS}, ) if __name__ == '__main__': main() patroni-3.2.2/tests/000077500000000000000000000000001455170150700143415ustar00rootroot00000000000000patroni-3.2.2/tests/__init__.py000066400000000000000000000316051455170150700164570ustar00rootroot00000000000000import datetime import os import shutil import unittest from mock import Mock, PropertyMock, patch import urllib3 import patroni.psycopg as psycopg from patroni.dcs import Leader, Member from patroni.postgresql import Postgresql from patroni.postgresql.config import ConfigHandler from patroni.utils import RetryFailedError, tzutc class SleepException(Exception): pass mock_available_gucs = PropertyMock(return_value={ 'cluster_name', 'constraint_exclusion', 'force_parallel_mode', 'hot_standby', 'listen_addresses', 'max_connections', 'max_locks_per_transaction', 'max_prepared_transactions', 'max_replication_slots', 'max_stack_depth', 'max_wal_senders', 'max_worker_processes', 'port', 'search_path', 'shared_preload_libraries', 'stats_temp_directory', 'synchronous_standby_names', 'track_commit_timestamp', 'unix_socket_directories', 'vacuum_cost_delay', 'vacuum_cost_limit', 'wal_keep_size', 'wal_level', 'wal_log_hints', 'zero_damaged_pages', 'autovacuum', 'wal_segment_size', 'wal_block_size', 'shared_buffers', 'wal_buffers', }) GET_PG_SETTINGS_RESULT = [ ('wal_segment_size', '2048', '8kB', 'integer', 'internal'), ('wal_block_size', '8192', None, 'integer', 'internal'), ('shared_buffers', '16384', '8kB', 'integer', 'postmaster'), ('wal_buffers', '-1', '8kB', 'integer', 'postmaster'), ('max_connections', '100', None, 'integer', 'postmaster'), ('max_prepared_transactions', '200', None, 'integer', 'postmaster'), ('max_worker_processes', '8', None, 'integer', 'postmaster'), ('max_locks_per_transaction', '64', None, 'integer', 'postmaster'), ('max_wal_senders', '5', None, 'integer', 'postmaster'), ('search_path', 'public', None, 'string', 'user'), ('port', '5432', None, 'integer', 'postmaster'), ('listen_addresses', '127.0.0.2, 127.0.0.3', None, 'string', 'postmaster'), ('autovacuum', 'on', None, 'bool', 'sighup'), ('unix_socket_directories', '/tmp', None, 'string', 'postmaster'), ('shared_preload_libraries', 'citus', None, 'string', 'postmaster'), ('wal_keep_size', '128', 'MB', 'integer', 'sighup'), ('cluster_name', 'batman', None, 'string', 'postmaster'), ('vacuum_cost_delay', '200', 'ms', 'real', 'user'), ('vacuum_cost_limit', '-1', None, 'integer', 'user'), ('max_stack_depth', '2048', 'kB', 'integer', 'superuser'), ('constraint_exclusion', '', None, 'enum', 'user'), ('force_parallel_mode', '1', None, 'enum', 'user'), ('zero_damaged_pages', 'off', None, 'bool', 'superuser'), ('stats_temp_directory', '/tmp', None, 'string', 'sighup'), ('track_commit_timestamp', 'off', None, 'bool', 'postmaster'), ('wal_log_hints', 'on', None, 'bool', 'postmaster'), ('hot_standby', 'on', None, 'bool', 'postmaster'), ('max_replication_slots', '5', None, 'integer', 'postmaster'), ('wal_level', 'logical', None, 'enum', 'postmaster'), ] class MockResponse(object): def __init__(self, status_code=200): self.status_code = status_code self.headers = {'content-type': 'json'} self.content = '{}' self.reason = 'Not Found' @property def data(self): return self.content.encode('utf-8') @property def status(self): return self.status_code @staticmethod def getheader(*args): return '' def requests_get(url, method='GET', endpoint=None, data='', **kwargs): members = '[{"id":14855829450254237642,"peerURLs":["http://localhost:2380","http://localhost:7001"],' +\ '"name":"default","clientURLs":["http://localhost:2379","http://localhost:4001"]}]' response = MockResponse() if endpoint == 'failsafe': response.content = 'Accepted' elif url.startswith('http://local'): raise urllib3.exceptions.HTTPError() elif ':8011/patroni' in url: response.content = '{"role": "replica", "wal": {"received_location": 0}, "tags": {}}' elif url.endswith('/members'): response.content = '[{}]' if url.startswith('http://error') else members elif url.startswith('http://exhibitor'): response.content = '{"servers":["127.0.0.1","127.0.0.2","127.0.0.3"],"port":2181}' elif url.endswith(':8011/reinitialize'): if ' false}' in data: response.status_code = 503 response.content = 'restarting after failure already in progress' else: response.status_code = 404 return response class MockPostmaster(object): def __init__(self, pid=1): self.is_running = Mock(return_value=self) self.wait_for_user_backends_to_close = Mock() self.signal_stop = Mock(return_value=None) self.wait = Mock() self.signal_kill = Mock(return_value=False) class MockCursor(object): def __init__(self, connection): self.connection = connection self.closed = False self.rowcount = 0 self.results = [] self.description = [Mock()] def execute(self, sql, *params): if isinstance(sql, bytes): sql = sql.decode('utf-8') if sql.startswith('blabla'): raise psycopg.ProgrammingError() elif sql == 'CHECKPOINT' or sql.startswith('SELECT pg_catalog.pg_create_'): raise psycopg.OperationalError() elif sql.startswith('RetryFailedError'): raise RetryFailedError('retry') elif sql.startswith('SELECT slot_name, catalog_xmin'): self.results = [('postgresql0', 100), ('ls', 100)] elif sql.startswith('SELECT slot_name, slot_type, datname, plugin, catalog_xmin'): self.results = [('ls', 'logical', 'a', 'b', 100, 500, b'123456')] elif sql.startswith('SELECT slot_name'): self.results = [('blabla', 'physical', 12345), ('foobar', 'physical', 12345), ('ls', 'logical', 499, 'b', 'a', 5, 100, 500)] elif sql.startswith('WITH slots AS (SELECT slot_name, active'): self.results = [(False, True)] if self.rowcount == 1 else [] elif sql.startswith('SELECT CASE WHEN pg_catalog.pg_is_in_recovery()'): self.results = [(1, 2, 1, 0, False, 1, 1, None, None, 'streaming', '', [{"slot_name": "ls", "confirmed_flush_lsn": 12345, "restart_lsn": 12344}], 'on', 'n1', None)] elif sql.startswith('SELECT pg_catalog.pg_is_in_recovery()'): self.results = [(False, 2)] elif sql.startswith('SELECT pg_catalog.pg_postmaster_start_time'): self.results = [(datetime.datetime.now(tzutc),)] elif sql.startswith('SELECT name, pg_catalog.current_setting(name) FROM pg_catalog.pg_settings'): self.results = [('data_directory', 'data'), ('hba_file', os.path.join('data', 'pg_hba.conf')), ('ident_file', os.path.join('data', 'pg_ident.conf')), ('max_connections', 42), ('max_locks_per_transaction', 73), ('max_prepared_transactions', 0), ('max_replication_slots', 21), ('max_wal_senders', 37), ('track_commit_timestamp', 'off'), ('wal_level', 'replica'), ('listen_addresses', '6.6.6.6'), ('port', 1984), ('archive_command', 'my archive command'), ('cluster_name', 'my_cluster')] elif sql.startswith('SELECT name, setting'): self.results = GET_PG_SETTINGS_RESULT elif sql.startswith('SELECT COUNT(*) FROM pg_catalog.pg_settings'): self.results = [(0,)] elif sql.startswith('IDENTIFY_SYSTEM'): self.results = [('1', 3, '0/402EEC0', '')] elif sql.startswith('TIMELINE_HISTORY '): self.results = [('', b'x\t0/40159C0\tno recovery target specified\n\n' b'1\t0/40159C0\tno recovery target specified\n\n' b'2\t0/402DD98\tno recovery target specified\n\n' b'3\t0/403DD98\tno recovery target specified\n')] elif sql.startswith('SELECT pg_catalog.citus_add_node'): self.results = [(2,)] elif sql.startswith('SELECT nodeid, groupid'): self.results = [(1, 0, 'host1', 5432, 'primary'), (2, 1, 'host2', 5432, 'primary')] else: self.results = [(None, None, None, None, None, None, None, None, None, None)] self.rowcount = len(self.results) def fetchone(self): return self.results[0] def fetchall(self): return self.results def __iter__(self): for i in self.results: yield i def __enter__(self): return self def __exit__(self, *args): pass class MockConnectionInfo(object): def parameter_status(self, param_name): if param_name == 'is_superuser': return 'on' return '0' class MockConnect(object): server_version = 99999 autocommit = False closed = 0 info = MockConnectionInfo() def cursor(self): return MockCursor(self) def __enter__(self): return self def __exit__(self, *args): pass @staticmethod def close(): pass def psycopg_connect(*args, **kwargs): return MockConnect() class PostgresInit(unittest.TestCase): _PARAMETERS = {'wal_level': 'hot_standby', 'max_replication_slots': 5, 'f.oo': 'bar', 'search_path': 'public', 'hot_standby': 'on', 'max_wal_senders': 5, 'wal_keep_segments': 8, 'wal_log_hints': 'on', 'max_locks_per_transaction': 64, 'max_worker_processes': 8, 'max_connections': 100, 'max_prepared_transactions': 200, 'track_commit_timestamp': 'off', 'unix_socket_directories': '/tmp', 'trigger_file': 'bla', 'stats_temp_directory': '/tmp', 'zero_damaged_pages': 'off', 'force_parallel_mode': '1', 'constraint_exclusion': '', 'max_stack_depth': 2048, 'vacuum_cost_limit': -1, 'vacuum_cost_delay': 200} @patch('patroni.psycopg._connect', psycopg_connect) @patch('patroni.postgresql.CallbackExecutor', Mock()) @patch.object(ConfigHandler, 'write_postgresql_conf', Mock()) @patch.object(ConfigHandler, 'replace_pg_hba', Mock()) @patch.object(ConfigHandler, 'replace_pg_ident', Mock()) @patch.object(Postgresql, 'get_postgres_role_from_data_directory', Mock(return_value='primary')) def setUp(self): data_dir = os.path.join('data', 'test0') self.p = Postgresql({'name': 'postgresql0', 'scope': 'batman', 'data_dir': data_dir, 'config_dir': data_dir, 'retry_timeout': 10, 'krbsrvname': 'postgres', 'pgpass': os.path.join(data_dir, 'pgpass0'), 'listen': '127.0.0.2, 127.0.0.3:5432', 'connect_address': '127.0.0.2:5432', 'proxy_address': '127.0.0.2:5433', 'authentication': {'superuser': {'username': 'foo', 'password': 'test'}, 'replication': {'username': '', 'password': 'rep-pass'}, 'rewind': {'username': 'rewind', 'password': 'test'}}, 'remove_data_directory_on_rewind_failure': True, 'use_pg_rewind': True, 'pg_ctl_timeout': 'bla', 'use_unix_socket': True, 'parameters': self._PARAMETERS, 'recovery_conf': {'foo': 'bar'}, 'pg_hba': ['host all all 0.0.0.0/0 md5'], 'pg_ident': ['krb realm postgres'], 'callbacks': {'on_start': 'true', 'on_stop': 'true', 'on_reload': 'true', 'on_restart': 'true', 'on_role_change': 'true'}, 'citus': {'group': 0, 'database': 'citus'}}) class BaseTestPostgresql(PostgresInit): @patch('time.sleep', Mock()) def setUp(self): super(BaseTestPostgresql, self).setUp() if not os.path.exists(self.p.data_dir): os.makedirs(self.p.data_dir) self.leadermem = Member(0, 'leader', 28, {'xlog_location': 100, 'state': 'running', 'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5435/postgres'}) self.leader = Leader(-1, 28, self.leadermem) self.other = Member(0, 'test-1', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5433/postgres', 'state': 'running', 'tags': {'replicatefrom': 'leader'}}) self.me = Member(0, 'test0', 28, { 'state': 'running', 'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5434/postgres'}) def tearDown(self): if os.path.exists(self.p.data_dir): shutil.rmtree(self.p.data_dir) patroni-3.2.2/tests/test_api.py000066400000000000000000001101271455170150700165250ustar00rootroot00000000000000import datetime import json import unittest import socket from http.server import HTTPServer from io import BytesIO as IO from mock import Mock, PropertyMock, patch from socketserver import ThreadingMixIn from patroni.api import RestApiHandler, RestApiServer from patroni.config import GlobalConfig from patroni.dcs import ClusterConfig, Member from patroni.exceptions import PostgresConnectionException from patroni.ha import _MemberStatus from patroni.psycopg import OperationalError from patroni.utils import RetryFailedError, tzutc from . import MockConnect, psycopg_connect from .test_ha import get_cluster_initialized_without_leader future_restart_time = datetime.datetime.now(tzutc) + datetime.timedelta(days=5) postmaster_start_time = datetime.datetime.now(tzutc) class MockConnection: @staticmethod def get(*args): return psycopg_connect() @staticmethod def query(sql, *params): return [(postmaster_start_time, 0, '', 0, '', False, postmaster_start_time, 'streaming', None, '[{"application_name":"walreceiver","client_addr":"1.2.3.4",' + '"state":"streaming","sync_state":"async","sync_priority":0}]')] class MockConnectionPool: @staticmethod def get(*args): return MockConnection() class MockPostgresql: connection_pool = MockConnectionPool() name = 'test' state = 'running' role = 'primary' server_version = 90625 major_version = 90600 sysid = 'dummysysid' scope = 'dummy' pending_restart = True wal_name = 'wal' lsn_name = 'lsn' wal_flush = '_flush' POSTMASTER_START_TIME = 'pg_catalog.pg_postmaster_start_time()' TL_LSN = 'CASE WHEN pg_catalog.pg_is_in_recovery()' citus_handler = Mock() @staticmethod def postmaster_start_time(): return postmaster_start_time @staticmethod def replica_cached_timeline(_): return 2 @staticmethod def is_running(): return True @staticmethod def replication_state_from_parameters(*args): return 'streaming' class MockWatchdog(object): is_healthy = False class MockHa(object): state_handler = MockPostgresql() watchdog = MockWatchdog() @staticmethod def update_failsafe(*args): return 'foo' @staticmethod def failsafe_is_active(*args): return True @staticmethod def is_leader(): return False @staticmethod def reinitialize(_): return 'reinitialize' @staticmethod def restart(*args, **kwargs): return (True, '') @staticmethod def restart_scheduled(): return False @staticmethod def delete_future_restart(): return True @staticmethod def fetch_nodes_statuses(members): return [_MemberStatus(None, True, None, 0, {})] @staticmethod def schedule_future_restart(data): return True @staticmethod def is_lagging(wal): return False @staticmethod def get_effective_tags(): return {'nosync': True} @staticmethod def wakeup(): pass @staticmethod def is_paused(): return True class MockLogger(object): NORMAL_LOG_QUEUE_SIZE = 2 queue_size = 3 records_lost = 1 class MockConfig(object): def get_global_config(self, _): return GlobalConfig({}) class MockPatroni(object): ha = MockHa() config = MockConfig() postgresql = ha.state_handler dcs = Mock() logger = MockLogger() tags = {"key1": True, "key2": False, "key3": 1, "key4": 1.4, "key5": "RandomTag"} version = '0.00' noloadbalance = PropertyMock(return_value=False) scheduled_restart = {'schedule': future_restart_time, 'postmaster_start_time': postgresql.postmaster_start_time()} @staticmethod def sighup_handler(): pass @staticmethod def api_sigterm(): pass class MockRequest(object): def __init__(self, request): self.request = request.encode('utf-8') def makefile(self, *args, **kwargs): return IO(self.request) def sendall(self, *args, **kwargs): pass class MockRestApiServer(RestApiServer): def __init__(self, Handler, request, config=None): self.socket = 0 self.serve_forever = Mock() MockRestApiServer._BaseServer__is_shut_down = Mock() MockRestApiServer._BaseServer__shutdown_request = True config = config or {'listen': '127.0.0.1:8008', 'auth': 'test:test', 'certfile': 'dumb', 'verify_client': 'a', 'http_extra_headers': {'foo': 'bar'}, 'https_extra_headers': {'foo': 'sbar'}} super(MockRestApiServer, self).__init__(MockPatroni(), config) Handler(MockRequest(request), ('0.0.0.0', 8080), self) @patch('ssl.SSLContext.load_cert_chain', Mock()) @patch('ssl.SSLContext.wrap_socket', Mock(return_value=0)) @patch.object(HTTPServer, '__init__', Mock()) class TestRestApiHandler(unittest.TestCase): _authorization = '\nAuthorization: Basic dGVzdDp0ZXN0' def test_do_GET(self): MockPatroni.dcs.cluster.last_lsn = 20 MockPatroni.dcs.cluster.sync.members = [MockPostgresql.name] with patch.object(GlobalConfig, 'is_synchronous_mode', PropertyMock(return_value=True)): MockRestApiServer(RestApiHandler, 'GET /replica') MockRestApiServer(RestApiHandler, 'GET /replica?lag=1M') MockRestApiServer(RestApiHandler, 'GET /replica?lag=10MB') MockRestApiServer(RestApiHandler, 'GET /replica?lag=10485760') MockRestApiServer(RestApiHandler, 'GET /read-only') with patch.object(RestApiHandler, 'get_postgresql_status', Mock(return_value={})): MockRestApiServer(RestApiHandler, 'GET /replica') with patch.object(RestApiHandler, 'get_postgresql_status', Mock(return_value={'role': 'primary'})): MockRestApiServer(RestApiHandler, 'GET /replica') with patch.object(RestApiHandler, 'get_postgresql_status', Mock(return_value={'state': 'running'})): MockRestApiServer(RestApiHandler, 'GET /health') MockRestApiServer(RestApiHandler, 'GET /leader') with patch.object(RestApiHandler, 'get_postgresql_status', Mock(return_value={'role': 'replica', 'sync_standby': True})): MockRestApiServer(RestApiHandler, 'GET /synchronous') MockRestApiServer(RestApiHandler, 'GET /read-only-sync') with patch.object(RestApiHandler, 'get_postgresql_status', Mock(return_value={'role': 'replica'})): MockPatroni.dcs.cluster.sync.members = [] MockRestApiServer(RestApiHandler, 'GET /asynchronous') with patch.object(MockHa, 'is_leader', Mock(return_value=True)): MockRestApiServer(RestApiHandler, 'GET /replica') MockRestApiServer(RestApiHandler, 'GET /read-only-sync') with patch.object(GlobalConfig, 'is_standby_cluster', Mock(return_value=True)): MockRestApiServer(RestApiHandler, 'GET /standby_leader') MockPatroni.dcs.cluster = None with patch.object(RestApiHandler, 'get_postgresql_status', Mock(return_value={'role': 'primary'})): MockRestApiServer(RestApiHandler, 'GET /primary') with patch.object(MockHa, 'restart_scheduled', Mock(return_value=True)): MockRestApiServer(RestApiHandler, 'GET /primary') self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /primary')) with patch.object(RestApiServer, 'query', Mock(return_value=[('', 1, '', '', '', '', False, None, None, '')])): self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /patroni')) with patch.object(GlobalConfig, 'is_standby_cluster', Mock(return_value=True)), \ patch.object(GlobalConfig, 'is_paused', Mock(return_value=True)): MockRestApiServer(RestApiHandler, 'GET /standby_leader') # test tags # MockRestApiServer(RestApiHandler, 'GET /primary?lag=1M&' 'tag_key1=true&tag_key2=false&' 'tag_key3=1&tag_key4=1.4&tag_key5=RandomTag') MockRestApiServer(RestApiHandler, 'GET /primary?lag=1M&' 'tag_key1=true&tag_key2=False&' 'tag_key3=1&tag_key4=1.4&tag_key5=RandomTag') MockRestApiServer(RestApiHandler, 'GET /primary?lag=1M&' 'tag_key1=true&tag_key2=false&' 'tag_key3=1.0&tag_key4=1.4&tag_key5=RandomTag') MockRestApiServer(RestApiHandler, 'GET /primary?lag=1M&' 'tag_key1=true&tag_key2=false&' 'tag_key3=1&tag_key4=1.4&tag_key5=RandomTag&tag_key6=RandomTag2') # with patch.object(RestApiHandler, 'get_postgresql_status', Mock(return_value={'role': 'primary'})): MockRestApiServer(RestApiHandler, 'GET /primary?lag=1M&' 'tag_key1=true&tag_key2=false&' 'tag_key3=1&tag_key4=1.4&tag_key5=RandomTag') MockRestApiServer(RestApiHandler, 'GET /primary?lag=1M&' 'tag_key1=true&tag_key2=False&' 'tag_key3=1&tag_key4=1.4&tag_key5=RandomTag') MockRestApiServer(RestApiHandler, 'GET /primary?lag=1M&' 'tag_key1=true&tag_key2=false&' 'tag_key3=1.0&tag_key4=1.4&tag_key5=RandomTag') MockRestApiServer(RestApiHandler, 'GET /primary?lag=1M&' 'tag_key1=true&tag_key2=false&' 'tag_key3=1&tag_key4=1.4&tag_key5=RandomTag&tag_key6=RandomTag2') # with patch.object(RestApiHandler, 'get_postgresql_status', Mock(return_value={'role': 'standby_leader'})): MockRestApiServer(RestApiHandler, 'GET /standby_leader?lag=1M&' 'tag_key1=true&tag_key2=false&' 'tag_key3=1&tag_key4=1.4&tag_key5=RandomTag') MockRestApiServer(RestApiHandler, 'GET /standby_leader?lag=1M&' 'tag_key1=true&tag_key2=False&' 'tag_key3=1&tag_key4=1.4&tag_key5=RandomTag') MockRestApiServer(RestApiHandler, 'GET /standby_leader?lag=1M&' 'tag_key1=true&tag_key2=false&' 'tag_key3=1.0&tag_key4=1.4&tag_key5=RandomTag') MockRestApiServer(RestApiHandler, 'GET /standby_leader?lag=1M&' 'tag_key1=true&tag_key2=false&' 'tag_key3=1&tag_key4=1.4&tag_key5=RandomTag&tag_key6=RandomTag2') # MockRestApiServer(RestApiHandler, 'GET /replica?lag=1M&' 'tag_key1=true&tag_key2=false&' 'tag_key3=1&tag_key4=1.4&tag_key5=RandomTag') MockRestApiServer(RestApiHandler, 'GET /replica?lag=1M&' 'tag_key1=true&tag_key2=False&' 'tag_key3=1&tag_key4=1.4&tag_key5=RandomTag') MockRestApiServer(RestApiHandler, 'GET /replica?lag=1M&' 'tag_key1=true&tag_key2=false&' 'tag_key3=1.0&tag_key4=1.4&tag_key5=RandomTag') MockRestApiServer(RestApiHandler, 'GET /replica?lag=1M&' 'tag_key1=true&tag_key2=false&' 'tag_key3=1&tag_key4=1.4&tag_key5=RandomTag&tag_key6=RandomTag2') # with patch.object(RestApiHandler, 'get_postgresql_status', Mock(return_value={'role': 'primary'})): MockRestApiServer(RestApiHandler, 'GET /replica?lag=1M&' 'tag_key1=true&tag_key2=false&' 'tag_key3=1&tag_key4=1.4&tag_key5=RandomTag') MockRestApiServer(RestApiHandler, 'GET /replica?lag=1M&' 'tag_key1=true&tag_key2=False&' 'tag_key3=1&tag_key4=1.4&tag_key5=RandomTag') MockRestApiServer(RestApiHandler, 'GET /replica?lag=1M&' 'tag_key1=true&tag_key2=false&' 'tag_key3=1.0&tag_key4=1.4&tag_key5=RandomTag') MockRestApiServer(RestApiHandler, 'GET /replica?lag=1M&' 'tag_key1=true&tag_key2=false&' 'tag_key3=1&tag_key4=1.4&tag_key5=RandomTag&tag_key6=RandomTag2') # MockRestApiServer(RestApiHandler, 'GET /read-write?lag=1M&' 'tag_key1=true&tag_key2=false&' 'tag_key3=1&tag_key4=1.4&tag_key5=RandomTag') MockRestApiServer(RestApiHandler, 'GET /read-write?lag=1M&' 'tag_key1=true&tag_key2=False&' 'tag_key3=1&tag_key4=1.4&tag_key5=RandomTag') MockRestApiServer(RestApiHandler, 'GET /read-write?lag=1M&' 'tag_key1=true&tag_key2=false&' 'tag_key3=1.0&tag_key4=1.4&tag_key5=RandomTag') MockRestApiServer(RestApiHandler, 'GET /read-write?lag=1M&' 'tag_key1=true&tag_key2=false&' 'tag_key3=1&tag_key4=1.4&tag_key5=RandomTag&tag_key6=RandomTag2') def test_do_OPTIONS(self): self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'OPTIONS / HTTP/1.0')) def test_do_HEAD(self): self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'HEAD / HTTP/1.0')) @patch.object(MockPatroni, 'dcs') def test_do_GET_liveness(self, mock_dcs): mock_dcs.ttl.return_value = PropertyMock(30) self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /liveness HTTP/1.0')) def test_do_GET_readiness(self): self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /readiness HTTP/1.0')) with patch.object(MockHa, 'is_leader', Mock(return_value=True)): self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /readiness HTTP/1.0')) with patch.object(MockPostgresql, 'state', PropertyMock(return_value='stopped')): self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /readiness HTTP/1.0')) @patch.object(MockPostgresql, 'state', PropertyMock(return_value='stopped')) def test_do_GET_patroni(self): self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /patroni')) def test_basicauth(self): self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'POST /restart HTTP/1.0')) MockRestApiServer(RestApiHandler, 'POST /restart HTTP/1.0\nAuthorization:') @patch.object(MockPatroni, 'dcs') def test_do_GET_cluster(self, mock_dcs): mock_dcs.get_cluster.return_value = get_cluster_initialized_without_leader() mock_dcs.get_cluster.return_value.members[1].data['xlog_location'] = 11 self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /cluster')) @patch.object(MockPatroni, 'dcs') def test_do_GET_history(self, mock_dcs): mock_dcs.cluster = get_cluster_initialized_without_leader() self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /history')) @patch.object(MockPatroni, 'dcs') def test_do_GET_config(self, mock_dcs): mock_dcs.cluster.config.data = {} self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /config')) mock_dcs.cluster.config = None self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /config')) @patch.object(MockPatroni, 'dcs') def test_do_GET_metrics(self, mock_dcs): self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /metrics')) @patch.object(MockPatroni, 'dcs') def test_do_PATCH_config(self, mock_dcs): config = {'postgresql': {'use_slots': False, 'use_pg_rewind': True, 'parameters': {'wal_level': 'logical'}}} mock_dcs.get_cluster.return_value.config = ClusterConfig.from_node(1, json.dumps(config)) request = 'PATCH /config HTTP/1.0' + self._authorization self.assertIsNotNone(MockRestApiServer(RestApiHandler, request)) request += '\nContent-Length: ' self.assertIsNotNone(MockRestApiServer(RestApiHandler, request + '34\n\n{"postgresql":{"use_slots":false}}')) config['ttl'] = 5 config['postgresql'].update({'use_slots': {'foo': True}, "parameters": None}) config = json.dumps(config) request += str(len(config)) + '\n\n' + config MockRestApiServer(RestApiHandler, request) mock_dcs.set_config_value.return_value = False MockRestApiServer(RestApiHandler, request) mock_dcs.get_cluster.return_value.config = None MockRestApiServer(RestApiHandler, request) @patch.object(MockPatroni, 'dcs') def test_do_PUT_config(self, mock_dcs): mock_dcs.get_cluster.return_value.config = ClusterConfig.from_node(1, '{}') request = 'PUT /config HTTP/1.0' + self._authorization + '\nContent-Length: ' self.assertIsNotNone(MockRestApiServer(RestApiHandler, request + '2\n\n{}')) config = '{"foo": "bar"}' request += str(len(config)) + '\n\n' + config MockRestApiServer(RestApiHandler, request) mock_dcs.set_config_value.return_value = False MockRestApiServer(RestApiHandler, request) mock_dcs.get_cluster.return_value.config = ClusterConfig.from_node(1, config) MockRestApiServer(RestApiHandler, request) @patch.object(MockPatroni, 'dcs') def test_do_GET_failsafe(self, mock_dcs): type(mock_dcs).failsafe = PropertyMock(return_value={'node1': 'http://foo:8080/patroni'}) self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /failsafe')) type(mock_dcs).failsafe = PropertyMock(return_value=None) self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /failsafe')) def test_do_POST_failsafe(self): with patch.object(MockHa, 'is_failsafe_mode', Mock(return_value=False), create=True): self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'POST /failsafe HTTP/1.0' + self._authorization)) with patch.object(MockHa, 'is_failsafe_mode', Mock(return_value=True), create=True): self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'POST /failsafe HTTP/1.0' + self._authorization + '\nContent-Length: 9\n\n{"a":"b"}')) @patch.object(MockPatroni, 'sighup_handler', Mock()) def test_do_POST_reload(self): self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'POST /reload HTTP/1.0' + self._authorization)) @patch('os.environ', {'BEHAVE_DEBUG': 'true'}) @patch('os.name', 'nt') def test_do_POST_sigterm(self): self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'POST /sigterm HTTP/1.0' + self._authorization)) def test_do_POST_restart(self): request = 'POST /restart HTTP/1.0' + self._authorization self.assertIsNotNone(MockRestApiServer(RestApiHandler, request)) with patch.object(MockHa, 'restart', Mock(side_effect=Exception)): MockRestApiServer(RestApiHandler, request) post = request + '\nContent-Length: ' def make_request(request=None, **kwargs): request = json.dumps(kwargs) if request is None else request return '{0}{1}\n\n{2}'.format(post, len(request), request) # empty request request = make_request('') MockRestApiServer(RestApiHandler, request) # invalid request request = make_request('foobar=baz') MockRestApiServer(RestApiHandler, request) # wrong role request = make_request(schedule=future_restart_time.isoformat(), role='unknown', postgres_version='9.5.3') MockRestApiServer(RestApiHandler, request) # wrong version request = make_request(schedule=future_restart_time.isoformat(), role='primary', postgres_version='9.5.3.1') MockRestApiServer(RestApiHandler, request) # unknown filter request = make_request(schedule=future_restart_time.isoformat(), batman='lives') MockRestApiServer(RestApiHandler, request) # incorrect schedule request = make_request(schedule='2016-08-42 12:45TZ+1', role='primary') MockRestApiServer(RestApiHandler, request) # everything fine, but the schedule is missing request = make_request(role='primary', postgres_version='9.5.2') MockRestApiServer(RestApiHandler, request) for retval in (True, False): with patch.object(MockHa, 'schedule_future_restart', Mock(return_value=retval)): request = make_request(schedule=future_restart_time.isoformat()) MockRestApiServer(RestApiHandler, request) with patch.object(MockHa, 'restart', Mock(return_value=(retval, "foo"))): request = make_request(role='primary', postgres_version='9.5.2') MockRestApiServer(RestApiHandler, request) with patch.object(GlobalConfig, 'is_paused', PropertyMock(return_value=True)): MockRestApiServer(RestApiHandler, make_request(schedule='2016-08-42 12:45TZ+1', role='primary')) # Valid timeout MockRestApiServer(RestApiHandler, make_request(timeout='60s')) # Invalid timeout MockRestApiServer(RestApiHandler, make_request(timeout='42towels')) def test_do_DELETE_restart(self): for retval in (True, False): with patch.object(MockHa, 'delete_future_restart', Mock(return_value=retval)): request = 'DELETE /restart HTTP/1.0' + self._authorization self.assertIsNotNone(MockRestApiServer(RestApiHandler, request)) @patch.object(MockPatroni, 'dcs') def test_do_DELETE_switchover(self, mock_dcs): request = 'DELETE /switchover HTTP/1.0' + self._authorization self.assertIsNotNone(MockRestApiServer(RestApiHandler, request)) mock_dcs.manual_failover.return_value = False self.assertIsNotNone(MockRestApiServer(RestApiHandler, request)) mock_dcs.get_cluster.return_value.failover = None self.assertIsNotNone(MockRestApiServer(RestApiHandler, request)) def test_do_POST_reinitialize(self): request = 'POST /reinitialize HTTP/1.0' + self._authorization + '\nContent-Length: 15\n\n{"force": true}' MockRestApiServer(RestApiHandler, request) with patch.object(MockHa, 'reinitialize', Mock(return_value=None)): MockRestApiServer(RestApiHandler, request) @patch('time.sleep', Mock()) def test_RestApiServer_query(self): with patch.object(MockConnection, 'query', Mock(side_effect=RetryFailedError('bla'))): self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /patroni')) @patch('time.sleep', Mock()) @patch.object(MockPatroni, 'dcs') def test_do_POST_switchover(self, dcs): dcs.loop_wait = 10 cluster = dcs.get_cluster.return_value post = 'POST /switchover HTTP/1.0' + self._authorization + '\nContent-Length: ' # Invalid content with patch.object(RestApiHandler, 'write_response') as response_mock: MockRestApiServer(RestApiHandler, post + '7\n\n{"1":2}') response_mock.assert_called_with(400, 'Switchover could be performed only from a specific leader') # Empty content request = post + '0\n\n' MockRestApiServer(RestApiHandler, request) # [Switchover without a candidate] # Cluster with only a leader with patch.object(RestApiHandler, 'write_response') as response_mock: cluster.leader.name = 'postgresql1' request = post + '25\n\n{"leader": "postgresql1"}' MockRestApiServer(RestApiHandler, request) response_mock.assert_called_with( 412, 'switchover is not possible: cluster does not have members except leader') # Switchover in pause mode with patch.object(RestApiHandler, 'write_response') as response_mock, \ patch.object(GlobalConfig, 'is_paused', PropertyMock(return_value=True)): MockRestApiServer(RestApiHandler, request) response_mock.assert_called_with( 400, 'Switchover is possible only to a specific candidate in a paused state') # No healthy nodes to promote in both sync and async mode for is_synchronous_mode, response in ( (True, 'switchover is not possible: can not find sync_standby'), (False, 'switchover is not possible: cluster does not have members except leader')): with patch.object(GlobalConfig, 'is_synchronous_mode', PropertyMock(return_value=is_synchronous_mode)), \ patch.object(RestApiHandler, 'write_response') as response_mock: MockRestApiServer(RestApiHandler, request) response_mock.assert_called_with(412, response) # [Switchover to the candidate specified] # Candidate to promote is the same as the leader specified with patch.object(RestApiHandler, 'write_response') as response_mock: request = post + '53\n\n{"leader": "postgresql2", "candidate": "postgresql2"}' MockRestApiServer(RestApiHandler, request) response_mock.assert_called_with(400, 'Switchover target and source are the same') # Current leader is different from the one specified with patch.object(RestApiHandler, 'write_response') as response_mock: cluster.leader.name = 'postgresql2' request = post + '53\n\n{"leader": "postgresql1", "candidate": "postgresql2"}' MockRestApiServer(RestApiHandler, request) response_mock.assert_called_with(412, 'leader name does not match') # Candidate to promote is not a member of the cluster cluster.leader.name = 'postgresql1' cluster.sync.matches.return_value = False for is_synchronous_mode, response in ( (True, 'candidate name does not match with sync_standby'), (False, 'candidate does not exists')): with patch.object(GlobalConfig, 'is_synchronous_mode', PropertyMock(return_value=is_synchronous_mode)), \ patch.object(RestApiHandler, 'write_response') as response_mock: MockRestApiServer(RestApiHandler, request) response_mock.assert_called_with(412, response) cluster.members = [Member(0, 'postgresql0', 30, {'api_url': 'http'}), Member(0, 'postgresql2', 30, {'api_url': 'http'})] # Failover key is empty in DCS with patch.object(RestApiHandler, 'write_response') as response_mock: cluster.failover = None MockRestApiServer(RestApiHandler, request) response_mock.assert_called_with(503, 'Switchover failed') # Result polling failed with patch.object(RestApiHandler, 'write_response') as response_mock: dcs.get_cluster.side_effect = [cluster] MockRestApiServer(RestApiHandler, request) response_mock.assert_called_with(503, 'Switchover status unknown') # Switchover to a node different from the candidate specified with patch.object(RestApiHandler, 'write_response') as response_mock: cluster2 = cluster.copy() cluster2.leader.name = 'postgresql0' cluster2.is_unlocked.return_value = False dcs.get_cluster.side_effect = [cluster, cluster2] MockRestApiServer(RestApiHandler, request) response_mock.assert_called_with(200, 'Switched over to "postgresql0" instead of "postgresql2"') # Successful switchover to the candidate with patch.object(RestApiHandler, 'write_response') as response_mock: cluster2.leader.name = 'postgresql2' dcs.get_cluster.side_effect = [cluster, cluster2] MockRestApiServer(RestApiHandler, request) response_mock.assert_called_with(200, 'Successfully switched over to "postgresql2"') with patch.object(RestApiHandler, 'write_response') as response_mock: dcs.manual_failover.return_value = False dcs.get_cluster.side_effect = None MockRestApiServer(RestApiHandler, request) response_mock.assert_called_with(503, 'failed to write failover key into DCS') dcs.manual_failover.return_value = True # Candidate is not healthy to be promoted with patch.object(MockHa, 'fetch_nodes_statuses', Mock(return_value=[])), \ patch.object(RestApiHandler, 'write_response') as response_mock: MockRestApiServer(RestApiHandler, request) response_mock.assert_called_with(412, 'switchover is not possible: no good candidates have been found') # [Scheduled switchover] # Valid future date with patch.object(RestApiHandler, 'write_response') as response_mock: request = post + '103\n\n{"leader": "postgresql1", "member": "postgresql2",' + \ ' "scheduled_at": "6016-02-15T18:13:30.568224+01:00"}' MockRestApiServer(RestApiHandler, request) response_mock.assert_called_with(202, 'Switchover scheduled') # Schedule in paused mode with patch.object(RestApiHandler, 'write_response') as response_mock, \ patch.object(GlobalConfig, 'is_paused', PropertyMock(return_value=True)): dcs.manual_failover.return_value = False MockRestApiServer(RestApiHandler, request) response_mock.assert_called_with(400, "Can't schedule switchover in the paused state") # No timezone specified with patch.object(RestApiHandler, 'write_response') as response_mock: request = post + '97\n\n{"leader": "postgresql1", "member": "postgresql2",' + \ ' "scheduled_at": "6016-02-15T18:13:30.568224"}' MockRestApiServer(RestApiHandler, request) response_mock.assert_called_with(400, 'Timezone information is mandatory for the scheduled switchover') request = post + '103\n\n{"leader": "postgresql1", "member": "postgresql2", "scheduled_at": "' # Scheduled in the past with patch.object(RestApiHandler, 'write_response') as response_mock: MockRestApiServer(RestApiHandler, request + '1016-02-15T18:13:30.568224+01:00"}') response_mock.assert_called_with(422, 'Cannot schedule switchover in the past') # Invalid date with patch.object(RestApiHandler, 'write_response') as response_mock: MockRestApiServer(RestApiHandler, request + '2010-02-29T18:13:30.568224+01:00"}') response_mock.assert_called_with( 422, 'Unable to parse scheduled timestamp. It should be in an unambiguous format, e.g. ISO 8601') def test_do_POST_failover(self): post = 'POST /failover HTTP/1.0' + self._authorization + '\nContent-Length: ' with patch.object(RestApiHandler, 'write_response') as response_mock: MockRestApiServer(RestApiHandler, post + '14\n\n{"leader":"1"}') response_mock.assert_called_once_with(400, 'Failover could be performed only to a specific candidate') with patch.object(RestApiHandler, 'write_response') as response_mock: MockRestApiServer(RestApiHandler, post + '37\n\n{"candidate":"2","scheduled_at": "1"}') response_mock.assert_called_once_with(400, "Failover can't be scheduled") with patch.object(RestApiHandler, 'write_response') as response_mock: MockRestApiServer(RestApiHandler, post + '30\n\n{"leader":"1","candidate":"2"}') response_mock.assert_called_once_with(412, 'leader name does not match') @patch.object(MockHa, 'is_leader', Mock(return_value=True)) def test_do_POST_citus(self): post = 'POST /citus HTTP/1.0' + self._authorization + '\nContent-Length: ' MockRestApiServer(RestApiHandler, post + '0\n\n') MockRestApiServer(RestApiHandler, post + '14\n\n{"leader":"1"}') class TestRestApiServer(unittest.TestCase): @patch('ssl.SSLContext.load_cert_chain', Mock()) @patch('ssl.SSLContext.set_ciphers', Mock()) @patch('ssl.SSLContext.wrap_socket', Mock(return_value=0)) @patch.object(HTTPServer, '__init__', Mock()) def setUp(self): self.srv = MockRestApiServer(Mock(), '', {'listen': '*:8008', 'certfile': 'a', 'verify_client': 'required', 'ciphers': '!SSLv1:!SSLv2:!SSLv3:!TLSv1:!TLSv1.1', 'allowlist': ['127.0.0.1', '::1/128', '::1/zxc'], 'allowlist_include_members': True}) @patch.object(HTTPServer, '__init__', Mock()) def test_reload_config(self): bad_config = {'listen': 'foo'} self.assertRaises(ValueError, MockRestApiServer, None, '', bad_config) self.assertRaises(ValueError, self.srv.reload_config, bad_config) self.assertRaises(ValueError, self.srv.reload_config, {}) with patch.object(socket.socket, 'setsockopt', Mock(side_effect=socket.error)), \ patch.object(MockRestApiServer, 'server_close', Mock()): self.srv.reload_config({'listen': ':8008'}) @patch.object(MockPatroni, 'dcs') def test_check_access(self, mock_dcs): mock_dcs.cluster = get_cluster_initialized_without_leader() mock_dcs.cluster.members[1].data['api_url'] = 'http://127.0.0.1z:8011/patroni' mock_dcs.cluster.members.append(Member(0, 'bad-api-url', 30, {'api_url': 123})) mock_rh = Mock() mock_rh.client_address = ('127.0.0.2',) self.assertIsNot(self.srv.check_access(mock_rh), True) mock_rh.client_address = ('127.0.0.1',) mock_rh.request.getpeercert.return_value = None self.assertIsNot(self.srv.check_access(mock_rh), True) def test_handle_error(self): try: raise Exception() except Exception: self.assertIsNone(self.srv.handle_error(None, ('127.0.0.1', 55555))) @patch.object(HTTPServer, '__init__', Mock(side_effect=socket.error)) def test_socket_error(self): self.assertRaises(socket.error, MockRestApiServer, Mock(), '', {'listen': '*:8008'}) def __create_socket(self): sock = socket.socket() try: import ssl ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) ctx.check_hostname = False sock = ctx.wrap_socket(sock=sock) sock.do_handshake = Mock() sock.unwrap = Mock(side_effect=Exception) except Exception: pass return sock @patch.object(ThreadingMixIn, 'process_request_thread', Mock()) def test_process_request_thread(self): self.srv.process_request_thread(self.__create_socket(), ('2', 54321)) @patch.object(MockRestApiServer, 'process_request', Mock(side_effect=RuntimeError)) @patch.object(MockRestApiServer, 'get_request') def test_process_request_error(self, mock_get_request): mock_get_request.return_value = (self.__create_socket(), ('127.0.0.1', 55555)) self.srv._handle_request_noblock() @patch('ssl._ssl._test_decode_cert', Mock()) def test_reload_local_certificate(self): self.assertTrue(self.srv.reload_local_certificate()) def test_get_certificate_serial_number(self): self.assertIsNone(self.srv.get_certificate_serial_number()) def test_query(self): with patch.object(MockConnection, 'get', Mock(side_effect=OperationalError)): self.assertRaises(PostgresConnectionException, self.srv.query, 'SELECT 1') with patch.object(MockConnection, 'get', Mock(side_effect=[MockConnect(), OperationalError])), \ patch.object(MockConnection, 'query') as mock_query: self.srv.query('SELECT 1') mock_query.assert_called_once_with('SELECT 1') patroni-3.2.2/tests/test_async_executor.py000066400000000000000000000013641455170150700210110ustar00rootroot00000000000000import unittest from mock import Mock, patch from patroni.async_executor import AsyncExecutor, CriticalTask from threading import Thread class TestAsyncExecutor(unittest.TestCase): def setUp(self): self.a = AsyncExecutor(Mock(), Mock()) @patch.object(Thread, 'start', Mock()) def test_run_async(self): self.a.run_async(Mock(return_value=True)) def test_run(self): self.a.run(Mock(side_effect=Exception())) def test_cancel(self): self.a.cancel() self.a.schedule('foo') self.a.cancel() self.a.run(Mock()) class TestCriticalTask(unittest.TestCase): def test_completed_task(self): ct = CriticalTask() ct.complete(1) self.assertFalse(ct.cancel()) patroni-3.2.2/tests/test_aws.py000066400000000000000000000045651455170150700165560ustar00rootroot00000000000000import botocore import botocore.awsrequest import sys import unittest from mock import Mock, PropertyMock, patch from collections import namedtuple from patroni.scripts.aws import AWSConnection, main as _main class MockVolumes(object): @staticmethod def filter(*args, **kwargs): oid = namedtuple('Volume', 'id') return [oid(id='a'), oid(id='b')] class MockEc2Connection(object): volumes = MockVolumes() @staticmethod def create_tags(Resources, **kwargs): if len(Resources) == 0: raise botocore.exceptions.ClientError({'Error': {'Code': 503, 'Message': 'Request limit exceeded'}}, 'create_tags') return True class MockIMDSFetcher(object): def __init__(self, timeout): pass @staticmethod def _fetch_metadata_token(): return '' @staticmethod def _get_request(*args): return botocore.awsrequest.AWSResponse(url='', status_code=200, headers={}, raw=None) @patch('boto3.resource', Mock(return_value=MockEc2Connection())) @patch('patroni.scripts.aws.IMDSFetcher', MockIMDSFetcher) class TestAWSConnection(unittest.TestCase): @patch.object(botocore.awsrequest.AWSResponse, 'text', PropertyMock(return_value='{"instanceId": "012345", "region": "eu-west-1"}')) def test_on_role_change(self): conn = AWSConnection('test') self.assertTrue(conn.on_role_change('primary')) with patch.object(MockVolumes, 'filter', Mock(return_value=[])): conn._retry.max_tries = 1 self.assertFalse(conn.on_role_change('primary')) @patch.object(MockIMDSFetcher, '_get_request', Mock(side_effect=Exception('foo'))) def test_non_aws(self): conn = AWSConnection('test') self.assertFalse(conn.on_role_change("primary")) @patch.object(botocore.awsrequest.AWSResponse, 'text', PropertyMock(return_value='boo')) def test_aws_bizare_response(self): conn = AWSConnection('test') self.assertFalse(conn.aws_available()) @patch.object(MockIMDSFetcher, '_get_request', Mock(return_value=botocore.awsrequest.AWSResponse( url='', status_code=503, headers={}, raw=None))) @patch('sys.exit', Mock()) def test_main(self): self.assertIsNone(_main()) sys.argv = ['aws.py', 'on_start', 'replica', 'foo'] self.assertIsNone(_main()) patroni-3.2.2/tests/test_bootstrap.py000066400000000000000000000331561455170150700177770ustar00rootroot00000000000000import os import sys from mock import Mock, PropertyMock, patch from patroni.async_executor import CriticalTask from patroni.postgresql import Postgresql from patroni.postgresql.bootstrap import Bootstrap from patroni.postgresql.cancellable import CancellableSubprocess from patroni.postgresql.config import ConfigHandler from . import psycopg_connect, BaseTestPostgresql, mock_available_gucs @patch('subprocess.call', Mock(return_value=0)) @patch('patroni.psycopg.connect', psycopg_connect) @patch('os.rename', Mock()) @patch.object(Postgresql, 'available_gucs', mock_available_gucs) class TestBootstrap(BaseTestPostgresql): @patch('patroni.postgresql.CallbackExecutor', Mock()) def setUp(self): super(TestBootstrap, self).setUp() self.b = self.p.bootstrap @patch('time.sleep', Mock()) @patch.object(CancellableSubprocess, 'call') @patch.object(Postgresql, 'remove_data_directory', Mock(return_value=True)) @patch.object(Postgresql, 'data_directory_empty', Mock(return_value=False)) @patch.object(Bootstrap, '_post_restore', Mock(side_effect=OSError)) def test_create_replica(self, mock_cancellable_subprocess_call): self.p.config._config['create_replica_methods'] = ['pgBackRest'] self.p.config._config['pgBackRest'] = {'command': 'pgBackRest', 'keep_data': True, 'no_params': True} mock_cancellable_subprocess_call.return_value = 0 self.assertEqual(self.b.create_replica(self.leader), 0) self.p.config._config['create_replica_methods'] = ['basebackup'] self.p.config._config['basebackup'] = [{'max_rate': '100M'}, 'no-sync'] self.assertEqual(self.b.create_replica(self.leader), 0) self.p.config._config['basebackup'] = [{'max_rate': '100M', 'compress': '9'}] with patch('patroni.postgresql.bootstrap.logger.error', new_callable=Mock()) as mock_logger: self.b.create_replica(self.leader) mock_logger.assert_called_once() self.assertTrue("only one key-value is allowed and value should be a string" in mock_logger.call_args[0][0], "not matching {0}".format(mock_logger.call_args[0][0])) self.p.config._config['basebackup'] = [42] with patch('patroni.postgresql.bootstrap.logger.error', new_callable=Mock()) as mock_logger: self.b.create_replica(self.leader) mock_logger.assert_called_once() self.assertTrue("value should be string value or a single key-value pair" in mock_logger.call_args[0][0], "not matching {0}".format(mock_logger.call_args[0][0])) self.p.config._config['basebackup'] = {"foo": "bar"} self.assertEqual(self.b.create_replica(self.leader), 0) self.p.config._config['create_replica_methods'] = ['wale', 'basebackup'] del self.p.config._config['basebackup'] mock_cancellable_subprocess_call.return_value = 1 self.assertEqual(self.b.create_replica(self.leader), 1) mock_cancellable_subprocess_call.side_effect = Exception('foo') self.assertEqual(self.b.create_replica(self.leader), 1) mock_cancellable_subprocess_call.side_effect = [1, 0] self.assertEqual(self.b.create_replica(self.leader), 0) mock_cancellable_subprocess_call.side_effect = [Exception(), 0] self.assertEqual(self.b.create_replica(self.leader), 0) self.p.cancellable.cancel() self.assertEqual(self.b.create_replica(self.leader), 1) @patch('time.sleep', Mock()) @patch.object(CancellableSubprocess, 'call') @patch.object(Postgresql, 'remove_data_directory', Mock(return_value=True)) @patch.object(Bootstrap, '_post_restore', Mock(side_effect=OSError)) def test_create_replica_old_format(self, mock_cancellable_subprocess_call): """ The same test as before but with old 'create_replica_method' to test backward compatibility """ self.p.config._config['create_replica_method'] = ['wale', 'basebackup'] self.p.config._config['wale'] = {'command': 'foo'} mock_cancellable_subprocess_call.return_value = 0 self.assertEqual(self.b.create_replica(self.leader), 0) del self.p.config._config['wale'] self.assertEqual(self.b.create_replica(self.leader), 0) self.p.config._config['create_replica_method'] = ['wale'] mock_cancellable_subprocess_call.return_value = 1 self.assertEqual(self.b.create_replica(self.leader), 1) def test_basebackup(self): self.p.cancellable.cancel() self.b.basebackup(None, None, {'foo': 'bar'}) def test__initdb(self): self.assertRaises(Exception, self.b.bootstrap, {'initdb': [{'pgdata': 'bar'}]}) self.assertRaises(Exception, self.b.bootstrap, {'initdb': [{'foo': 'bar', 1: 2}]}) self.assertRaises(Exception, self.b.bootstrap, {'initdb': [1]}) self.assertRaises(Exception, self.b.bootstrap, {'initdb': 1}) def test__process_user_options(self): def error_handler(msg): raise Exception(msg) self.assertEqual(self.b.process_user_options('initdb', ['string'], (), error_handler), ['--string']) self.assertEqual( self.b.process_user_options( 'initdb', [{'key': 'value'}], (), error_handler ), ['--key=value']) if sys.platform != 'win32': self.assertEqual( self.b.process_user_options( 'initdb', [{'key': 'value with spaces'}], (), error_handler ), ["--key=value with spaces"]) self.assertEqual( self.b.process_user_options( 'initdb', [{'key': "'value with spaces'"}], (), error_handler ), ["--key=value with spaces"]) self.assertEqual( self.b.process_user_options( 'initdb', {'key': 'value with spaces'}, (), error_handler ), ["--key=value with spaces"]) self.assertEqual( self.b.process_user_options( 'initdb', {'key': "'value with spaces'"}, (), error_handler ), ["--key=value with spaces"]) @patch.object(CancellableSubprocess, 'call', Mock()) @patch.object(Postgresql, 'is_running', Mock(return_value=True)) @patch.object(Postgresql, 'data_directory_empty', Mock(return_value=False)) @patch.object(Postgresql, 'controldata', Mock(return_value={'max_connections setting': 100, 'max_prepared_xacts setting': 0, 'max_locks_per_xact setting': 64})) def test_bootstrap(self): with patch('subprocess.call', Mock(return_value=1)): self.assertFalse(self.b.bootstrap({})) config = {'users': {'replicator': {'password': 'rep-pass', 'options': ['replication']}}} with patch.object(Postgresql, 'is_running', Mock(return_value=False)), \ patch.object(Postgresql, 'get_major_version', Mock(return_value=140000)), \ patch('multiprocessing.Process', Mock(side_effect=Exception)), \ patch('multiprocessing.get_context', Mock(side_effect=Exception), create=True): self.assertRaises(Exception, self.b.bootstrap, config) with open(os.path.join(self.p.data_dir, 'pg_hba.conf')) as f: lines = f.readlines() self.assertTrue('host all all 0.0.0.0/0 md5\n' in lines) self.p.config._config.pop('pg_hba') config.update({'post_init': '/bin/false', 'pg_hba': ['host replication replicator 127.0.0.1/32 md5', 'hostssl all all 0.0.0.0/0 md5', 'host all all 0.0.0.0/0 md5']}) self.b.bootstrap(config) with open(os.path.join(self.p.data_dir, 'pg_hba.conf')) as f: lines = f.readlines() self.assertTrue('host replication replicator 127.0.0.1/32 md5\n' in lines) @patch.object(CancellableSubprocess, 'call') @patch.object(Postgresql, 'get_major_version', Mock(return_value=90600)) @patch.object(Postgresql, 'controldata', Mock(return_value={'Database cluster state': 'in production'})) def test_custom_bootstrap(self, mock_cancellable_subprocess_call): self.p.config._config.pop('pg_hba') config = {'method': 'foo', 'foo': {'command': 'bar --arg1=val1'}} mock_cancellable_subprocess_call.return_value = 1 self.assertFalse(self.b.bootstrap(config)) self.assertEqual(mock_cancellable_subprocess_call.call_args_list[0][0][0], ['bar', '--arg1=val1', '--scope=batman', '--datadir=' + os.path.join('data', 'test0')]) mock_cancellable_subprocess_call.reset_mock() config['foo']['no_params'] = 1 self.assertFalse(self.b.bootstrap(config)) self.assertEqual(mock_cancellable_subprocess_call.call_args_list[0][0][0], ['bar', '--arg1=val1']) mock_cancellable_subprocess_call.return_value = 0 with patch('multiprocessing.Process', Mock(side_effect=Exception("42"))), \ patch('multiprocessing.get_context', Mock(side_effect=Exception("42")), create=True), \ patch('os.path.isfile', Mock(return_value=True)), \ patch('os.unlink', Mock()), \ patch.object(ConfigHandler, 'save_configuration_files', Mock()), \ patch.object(ConfigHandler, 'restore_configuration_files', Mock()), \ patch.object(ConfigHandler, 'write_recovery_conf', Mock()): with self.assertRaises(Exception) as e: self.b.bootstrap(config) self.assertEqual(str(e.exception), '42') config['foo']['recovery_conf'] = {'foo': 'bar'} with self.assertRaises(Exception) as e: self.b.bootstrap(config) self.assertEqual(str(e.exception), '42') mock_cancellable_subprocess_call.side_effect = Exception self.assertFalse(self.b.bootstrap(config)) @patch('time.sleep', Mock()) @patch('os.unlink', Mock()) @patch('shutil.copy', Mock()) @patch('os.path.isfile', Mock(return_value=True)) @patch('patroni.postgresql.bootstrap.quote_ident', Mock()) @patch.object(Bootstrap, 'call_post_bootstrap', Mock(return_value=True)) @patch.object(Bootstrap, '_custom_bootstrap', Mock(return_value=True)) @patch.object(Postgresql, 'start', Mock(return_value=True)) @patch.object(Postgresql, 'get_major_version', Mock(return_value=110000)) def test_post_bootstrap(self): config = {'method': 'foo', 'foo': {'command': 'bar'}} self.b.bootstrap(config) task = CriticalTask() with patch.object(Bootstrap, 'create_or_update_role', Mock(side_effect=Exception)): self.b.post_bootstrap({}, task) self.assertFalse(task.result) self.p.config._config.pop('pg_hba') self.b.post_bootstrap({}, task) self.assertTrue(task.result) self.b.bootstrap(config) with patch.object(Postgresql, 'pending_restart', PropertyMock(return_value=True)), \ patch.object(Postgresql, 'restart', Mock()) as mock_restart: self.b.post_bootstrap({}, task) mock_restart.assert_called_once() self.b.bootstrap(config) self.p.set_state('stopped') self.p.reload_config({'authentication': {'superuser': {'username': 'p', 'password': 'p'}, 'replication': {'username': 'r', 'password': 'r'}, 'rewind': {'username': 'rw', 'password': 'rw'}}, 'listen': '*', 'retry_timeout': 10, 'parameters': {'wal_level': '', 'hba_file': 'foo', 'max_prepared_transactions': 10}}) with patch.object(Postgresql, 'major_version', PropertyMock(return_value=110000)), \ patch.object(Postgresql, 'restart', Mock()) as mock_restart: self.b.post_bootstrap({}, task) mock_restart.assert_called_once() @patch.object(CancellableSubprocess, 'call') def test_call_post_bootstrap(self, mock_cancellable_subprocess_call): mock_cancellable_subprocess_call.return_value = 1 self.assertFalse(self.b.call_post_bootstrap({'post_init': '/bin/false'})) mock_cancellable_subprocess_call.return_value = 0 self.p.connection_pool._conn_kwargs.pop('user') self.assertTrue(self.b.call_post_bootstrap({'post_init': '/bin/false'})) mock_cancellable_subprocess_call.assert_called() args, kwargs = mock_cancellable_subprocess_call.call_args self.assertTrue('PGPASSFILE' in kwargs['env']) self.assertEqual(args[0], ['/bin/false', 'dbname=postgres host=/tmp port=5432']) mock_cancellable_subprocess_call.reset_mock() self.p.connection_pool._conn_kwargs.pop('host') self.assertTrue(self.b.call_post_bootstrap({'post_init': '/bin/false'})) mock_cancellable_subprocess_call.assert_called() self.assertEqual(mock_cancellable_subprocess_call.call_args[0][0], ['/bin/false', 'dbname=postgres port=5432']) mock_cancellable_subprocess_call.side_effect = OSError self.assertFalse(self.b.call_post_bootstrap({'post_init': '/bin/false'})) @patch('os.path.exists', Mock(return_value=True)) @patch('os.unlink', Mock()) @patch.object(Bootstrap, 'create_replica', Mock(return_value=0)) def test_clone(self): self.b.clone(self.leader) patroni-3.2.2/tests/test_callback_executor.py000066400000000000000000000025501455170150700214260ustar00rootroot00000000000000import psutil import unittest from mock import Mock, patch from patroni.postgresql.callback_executor import CallbackExecutor class TestCallbackExecutor(unittest.TestCase): @patch('psutil.Popen') def test_callback_executor(self, mock_popen): mock_popen.return_value.children.return_value = [] mock_popen.return_value.is_running.return_value = True callback = ['test.sh', 'on_start', 'replica', 'foo'] ce = CallbackExecutor() ce._kill_children = Mock(side_effect=Exception) ce._invoke_excepthook = Mock() self.assertIsNone(ce.call(callback)) ce.join() self.assertIsNone(ce.call(callback)) mock_popen.return_value.kill.side_effect = psutil.AccessDenied() self.assertIsNone(ce.call(callback)) ce._process_children = [] mock_popen.return_value.children.side_effect = psutil.Error() mock_popen.return_value.kill.side_effect = psutil.NoSuchProcess(123) self.assertIsNone(ce.call(callback)) mock_popen.side_effect = Exception ce = CallbackExecutor() ce._condition.wait = Mock(side_effect=[None, Exception]) ce._invoke_excepthook = Mock() self.assertIsNone(ce.call(callback)) mock_popen.side_effect = [Mock()] self.assertIsNone(ce.call(['test.sh', 'on_reload', 'replica', 'foo'])) ce.join() patroni-3.2.2/tests/test_cancellable.py000066400000000000000000000022421455170150700201770ustar00rootroot00000000000000import psutil import unittest from mock import Mock, patch from patroni.exceptions import PostgresException from patroni.postgresql.cancellable import CancellableSubprocess class TestCancellableSubprocess(unittest.TestCase): def setUp(self): self.c = CancellableSubprocess() def test_call(self): self.c.cancel() self.assertRaises(PostgresException, self.c.call) def test__kill_children(self): self.c._process_children = [Mock()] self.c._kill_children() self.c._process_children[0].kill.side_effect = psutil.AccessDenied() self.c._kill_children() self.c._process_children[0].kill.side_effect = psutil.NoSuchProcess(123) self.c._kill_children() @patch('patroni.postgresql.cancellable.polling_loop', Mock(return_value=[0, 0])) def test_cancel(self): self.c._process = Mock() self.c._process.is_running.return_value = True self.c._process.children.side_effect = psutil.NoSuchProcess(123) self.c._process.suspend.side_effect = psutil.AccessDenied() self.c.cancel() self.c._process.is_running.side_effect = [True, False] self.c.cancel() patroni-3.2.2/tests/test_citus.py000066400000000000000000000236141455170150700171070ustar00rootroot00000000000000import time from mock import Mock, patch, PropertyMock from patroni.postgresql.citus import CitusHandler from patroni.psycopg import ProgrammingError from . import BaseTestPostgresql, MockCursor, psycopg_connect, SleepException from .test_ha import get_cluster_initialized_with_leader @patch('patroni.postgresql.citus.Thread', Mock()) @patch('patroni.psycopg.connect', psycopg_connect) class TestCitus(BaseTestPostgresql): def setUp(self): super(TestCitus, self).setUp() self.c = self.p.citus_handler self.cluster = get_cluster_initialized_with_leader() self.cluster.workers[1] = self.cluster @patch('time.time', Mock(side_effect=[100, 130, 160, 190, 220, 250, 280, 310, 340, 370])) @patch('patroni.postgresql.citus.logger.exception', Mock(side_effect=SleepException)) @patch('patroni.postgresql.citus.logger.warning') @patch('patroni.postgresql.citus.PgDistNode.wait', Mock()) @patch.object(CitusHandler, 'is_alive', Mock(return_value=True)) def test_run(self, mock_logger_warning): # `before_demote` or `before_promote` REST API calls starting a # transaction. We want to make sure that it finishes during # certain timeout. In case if it is not, we want to roll it back # in order to not block other workers that want to update # `pg_dist_node`. self.c._condition.wait = Mock(side_effect=[Mock(), Mock(), Mock(), SleepException]) self.c.handle_event(self.cluster, {'type': 'before_demote', 'group': 1, 'leader': 'leader', 'timeout': 30, 'cooldown': 10}) self.c.add_task('after_promote', 2, 'postgres://host3:5432/postgres') self.assertRaises(SleepException, self.c.run) mock_logger_warning.assert_called_once() self.assertTrue(mock_logger_warning.call_args[0][0].startswith('Rolling back transaction')) self.assertTrue(repr(mock_logger_warning.call_args[0][1]).startswith('PgDistNode')) @patch.object(CitusHandler, 'is_alive', Mock(return_value=False)) @patch.object(CitusHandler, 'start', Mock()) def test_sync_pg_dist_node(self): with patch.object(CitusHandler, 'is_enabled', Mock(return_value=False)): self.c.sync_pg_dist_node(self.cluster) self.c.sync_pg_dist_node(self.cluster) def test_handle_event(self): self.c.handle_event(self.cluster, {}) with patch.object(CitusHandler, 'is_alive', Mock(return_value=True)): self.c.handle_event(self.cluster, {'type': 'after_promote', 'group': 2, 'leader': 'leader', 'timeout': 30, 'cooldown': 10}) def test_add_task(self): with patch('patroni.postgresql.citus.logger.error') as mock_logger, \ patch('patroni.postgresql.citus.urlparse', Mock(side_effect=Exception)): self.c.add_task('', 1, None) mock_logger.assert_called_once() with patch('patroni.postgresql.citus.logger.debug') as mock_logger: self.c.add_task('before_demote', 1, 'postgres://host:5432/postgres', 30) mock_logger.assert_called_once() self.assertTrue(mock_logger.call_args[0][0].startswith('Adding the new task:')) with patch('patroni.postgresql.citus.logger.debug') as mock_logger: self.c.add_task('before_promote', 1, 'postgres://host:5432/postgres', 30) mock_logger.assert_called_once() self.assertTrue(mock_logger.call_args[0][0].startswith('Overriding existing task:')) # add_task called from sync_pg_dist_node should not override already scheduled or in flight task until deadline self.assertIsNotNone(self.c.add_task('after_promote', 1, 'postgres://host:5432/postgres', 30)) self.assertIsNone(self.c.add_task('after_promote', 1, 'postgres://host:5432/postgres')) self.c._in_flight = self.c._tasks.pop() self.c._in_flight.deadline = self.c._in_flight.timeout + time.time() self.assertIsNone(self.c.add_task('after_promote', 1, 'postgres://host:5432/postgres')) self.c._in_flight.deadline = 0 self.assertIsNotNone(self.c.add_task('after_promote', 1, 'postgres://host:5432/postgres')) # If there is no transaction in progress and cached pg_dist_node matching desired state task should not be added self.c._schedule_load_pg_dist_node = False self.c._pg_dist_node[self.c._in_flight.group] = self.c._in_flight self.c._in_flight = None self.assertIsNone(self.c.add_task('after_promote', 1, 'postgres://host:5432/postgres')) def test_pick_task(self): self.c.add_task('after_promote', 1, 'postgres://host2:5432/postgres') with patch.object(CitusHandler, 'process_task') as mock_process_task: self.c.process_tasks() # process_task() shouln't be called because pick_task double checks with _pg_dist_node mock_process_task.assert_not_called() def test_process_task(self): self.c.add_task('after_promote', 0, 'postgres://host2:5432/postgres') task = self.c.add_task('before_promote', 1, 'postgres://host4:5432/postgres', 30) self.c.process_tasks() self.assertTrue(task._event.is_set()) # the after_promote should result only in COMMIT task = self.c.add_task('after_promote', 1, 'postgres://host4:5432/postgres', 30) with patch.object(CitusHandler, 'query') as mock_query: self.c.process_tasks() mock_query.assert_called_once() self.assertEqual(mock_query.call_args[0][0], 'COMMIT') def test_process_tasks(self): self.c.add_task('after_promote', 0, 'postgres://host2:5432/postgres') self.c.process_tasks() self.c.add_task('after_promote', 0, 'postgres://host3:5432/postgres') with patch('patroni.postgresql.citus.logger.error') as mock_logger, \ patch.object(CitusHandler, 'query', Mock(side_effect=Exception)): self.c.process_tasks() mock_logger.assert_called_once() self.assertTrue(mock_logger.call_args[0][0].startswith('Exception when working with pg_dist_node: ')) def test_on_demote(self): self.c.on_demote() @patch('patroni.postgresql.citus.logger.error') @patch.object(MockCursor, 'execute', Mock(side_effect=Exception)) def test_load_pg_dist_node(self, mock_logger): # load_pg_dist_node() triggers, query fails and exception is property handled self.c.process_tasks() self.assertTrue(self.c._schedule_load_pg_dist_node) mock_logger.assert_called_once() self.assertTrue(mock_logger.call_args[0][0].startswith('Exception when executing query')) self.assertTrue(mock_logger.call_args[0][1].startswith('SELECT nodeid, groupid, ')) def test_wait(self): task = self.c.add_task('before_demote', 1, 'postgres://host:5432/postgres', 30) task._event.wait = Mock() task.wait() def test_adjust_postgres_gucs(self): parameters = {'max_connections': 101, 'max_prepared_transactions': 0, 'shared_preload_libraries': 'foo , citus, bar '} self.c.adjust_postgres_gucs(parameters) self.assertEqual(parameters['max_prepared_transactions'], 202) self.assertEqual(parameters['shared_preload_libraries'], 'citus,foo,bar') self.assertEqual(parameters['wal_level'], 'logical') self.assertEqual(parameters['citus.local_hostname'], '/tmp') def test_bootstrap(self): self.c._config = None self.c.bootstrap() def test_ignore_replication_slot(self): self.assertFalse(self.c.ignore_replication_slot({'name': 'foo', 'type': 'physical', 'database': 'bar', 'plugin': 'wal2json'})) self.assertFalse(self.c.ignore_replication_slot({'name': 'foo', 'type': 'logical', 'database': 'bar', 'plugin': 'wal2json'})) self.assertFalse(self.c.ignore_replication_slot({'name': 'foo', 'type': 'logical', 'database': 'bar', 'plugin': 'pgoutput'})) self.assertFalse(self.c.ignore_replication_slot({'name': 'foo', 'type': 'logical', 'database': 'citus', 'plugin': 'pgoutput'})) self.assertTrue(self.c.ignore_replication_slot({'name': 'citus_shard_move_slot_1_2_3', 'type': 'logical', 'database': 'citus', 'plugin': 'pgoutput'})) self.assertFalse(self.c.ignore_replication_slot({'name': 'citus_shard_move_slot_1_2_3', 'type': 'logical', 'database': 'citus', 'plugin': 'citus'})) self.assertFalse(self.c.ignore_replication_slot({'name': 'citus_shard_split_slot_1_2_3', 'type': 'logical', 'database': 'citus', 'plugin': 'pgoutput'})) self.assertTrue(self.c.ignore_replication_slot({'name': 'citus_shard_split_slot_1_2_3', 'type': 'logical', 'database': 'citus', 'plugin': 'citus'})) @patch('patroni.postgresql.citus.logger.debug') @patch('patroni.postgresql.citus.connect', psycopg_connect) @patch('patroni.postgresql.citus.quote_ident', Mock()) def test_bootstrap_duplicate_database(self, mock_logger): with patch.object(MockCursor, 'execute', Mock(side_effect=ProgrammingError)): self.assertRaises(ProgrammingError, self.c.bootstrap) with patch.object(MockCursor, 'execute', Mock(side_effect=[ProgrammingError, None, None, None])), \ patch.object(ProgrammingError, 'diag') as mock_diag: type(mock_diag).sqlstate = PropertyMock(return_value='42P04') self.c.bootstrap() mock_logger.assert_called_once() self.assertTrue(mock_logger.call_args[0][0].startswith('Exception when creating database')) patroni-3.2.2/tests/test_config.py000066400000000000000000000276331455170150700172320ustar00rootroot00000000000000import os import sys import unittest import io from copy import deepcopy from mock import MagicMock, Mock, patch from patroni.config import Config, ConfigParseError, GlobalConfig class TestConfig(unittest.TestCase): @patch('os.path.isfile', Mock(return_value=True)) @patch('json.load', Mock(side_effect=Exception)) @patch('builtins.open', MagicMock()) def setUp(self): sys.argv = ['patroni.py'] os.environ[Config.PATRONI_CONFIG_VARIABLE] = 'restapi: {}\npostgresql: {data_dir: foo}' self.config = Config(None) def test_set_dynamic_configuration(self): with patch.object(Config, '_build_effective_configuration', Mock(side_effect=Exception)): self.assertFalse(self.config.set_dynamic_configuration({'foo': 'bar'})) self.assertTrue(self.config.set_dynamic_configuration({'standby_cluster': {}, 'postgresql': { 'parameters': {'cluster_name': 1, 'hot_standby': 1, 'wal_keep_size': 1, 'track_commit_timestamp': 1, 'wal_level': 1, 'max_connections': '100'}}})) def test_reload_local_configuration(self): os.environ.update({ 'PATRONI_NAME': 'postgres0', 'PATRONI_NAMESPACE': '/patroni/', 'PATRONI_SCOPE': 'batman2', 'PATRONI_LOGLEVEL': 'ERROR', 'PATRONI_LOG_LOGGERS': 'patroni.postmaster: WARNING, urllib3: DEBUG', 'PATRONI_LOG_FILE_NUM': '5', 'PATRONI_CITUS_DATABASE': 'citus', 'PATRONI_CITUS_GROUP': '0', 'PATRONI_CITUS_HOST': '0', 'PATRONI_RESTAPI_USERNAME': 'username', 'PATRONI_RESTAPI_PASSWORD': 'password', 'PATRONI_RESTAPI_LISTEN': '0.0.0.0:8008', 'PATRONI_RESTAPI_CONNECT_ADDRESS': '127.0.0.1:8008', 'PATRONI_RESTAPI_CERTFILE': '/certfile', 'PATRONI_RESTAPI_KEYFILE': '/keyfile', 'PATRONI_RESTAPI_ALLOWLIST_INCLUDE_MEMBERS': 'on', 'PATRONI_POSTGRESQL_LISTEN': '0.0.0.0:5432', 'PATRONI_POSTGRESQL_CONNECT_ADDRESS': '127.0.0.1:5432', 'PATRONI_POSTGRESQL_PROXY_ADDRESS': '127.0.0.1:5433', 'PATRONI_POSTGRESQL_DATA_DIR': 'data/postgres0', 'PATRONI_POSTGRESQL_CONFIG_DIR': 'data/postgres0', 'PATRONI_POSTGRESQL_PGPASS': '/tmp/pgpass0', 'PATRONI_ETCD_HOST': '127.0.0.1:2379', 'PATRONI_ETCD_URL': 'https://127.0.0.1:2379', 'PATRONI_ETCD_PROXY': 'http://127.0.0.1:2379', 'PATRONI_ETCD_SRV': 'test', 'PATRONI_ETCD_CACERT': '/cacert', 'PATRONI_ETCD_CERT': '/cert', 'PATRONI_ETCD_KEY': '/key', 'PATRONI_CONSUL_HOST': '127.0.0.1:8500', 'PATRONI_CONSUL_REGISTER_SERVICE': 'on', 'PATRONI_KUBERNETES_LABELS': 'a: b: c', 'PATRONI_KUBERNETES_SCOPE_LABEL': 'a', 'PATRONI_KUBERNETES_PORTS': '[{"name": "postgresql"}]', 'PATRONI_KUBERNETES_RETRIABLE_HTTP_CODES': '401', 'PATRONI_ZOOKEEPER_HOSTS': "'host1:2181','host2:2181'", 'PATRONI_EXHIBITOR_HOSTS': 'host1,host2', 'PATRONI_EXHIBITOR_PORT': '8181', 'PATRONI_RAFT_PARTNER_ADDRS': "'host1:1234','host2:1234'", 'PATRONI_foo_HOSTS': '[host1,host2', # Exception in parse_list 'PATRONI_SUPERUSER_USERNAME': 'postgres', 'PATRONI_SUPERUSER_PASSWORD': 'zalando', 'PATRONI_REPLICATION_USERNAME': 'replicator', 'PATRONI_REPLICATION_PASSWORD': 'rep-pass', 'PATRONI_admin_PASSWORD': 'admin', 'PATRONI_admin_OPTIONS': 'createrole,createdb', 'PATRONI_POSTGRESQL_BIN_POSTGRES': 'sergtsop' }) config = Config('postgres0.yml') with patch.object(Config, '_load_config_file', Mock(return_value={'restapi': {}})): with patch.object(Config, '_build_effective_configuration', Mock(side_effect=Exception)): config.reload_local_configuration() self.assertTrue(config.reload_local_configuration()) self.assertIsNone(config.reload_local_configuration()) @patch('tempfile.mkstemp', Mock(return_value=[3000, 'blabla'])) @patch('os.path.exists', Mock(return_value=True)) @patch('os.remove', Mock(side_effect=IOError)) @patch('os.close', Mock(side_effect=IOError)) @patch('os.chmod', Mock()) @patch('shutil.move', Mock(return_value=None)) @patch('json.dump', Mock()) def test_save_cache(self): self.config.set_dynamic_configuration({'ttl': 30, 'postgresql': {'foo': 'bar'}}) with patch('os.fdopen', Mock(side_effect=IOError)): self.config.save_cache() with patch('os.fdopen', MagicMock()): self.config.save_cache() def test_standby_cluster_parameters(self): dynamic_configuration = { 'standby_cluster': { 'create_replica_methods': ['wal_e', 'basebackup'], 'host': 'localhost', 'port': 5432 } } self.config.set_dynamic_configuration(dynamic_configuration) for name, value in dynamic_configuration['standby_cluster'].items(): self.assertEqual(self.config['standby_cluster'][name], value) @patch('os.path.exists', Mock(return_value=True)) @patch('os.path.isfile', Mock(side_effect=lambda fname: fname != 'postgres0')) @patch('os.path.isdir', Mock(return_value=True)) @patch('os.listdir', Mock(return_value=['01-specific.yml', '00-base.yml'])) def test_configuration_directory(self): def open_mock(fname, *args, **kwargs): if fname.endswith('00-base.yml'): return io.StringIO( u''' test: True test2: child-1: somestring child-2: 5 child-3: False test3: True test4: - abc: 3 - abc: 4 ''') elif fname.endswith('01-specific.yml'): return io.StringIO( u''' test: False test2: child-2: 10 child-3: !!null test4: - ab: 5 new-attr: True ''') with patch('builtins.open', MagicMock(side_effect=open_mock)): config = Config('postgres0') self.assertEqual(config._local_configuration, {'test': False, 'test2': {'child-1': 'somestring', 'child-2': 10}, 'test3': True, 'test4': [{'ab': 5}], 'new-attr': True}) @patch('os.path.exists', Mock(return_value=True)) @patch('os.path.isfile', Mock(return_value=False)) @patch('os.path.isdir', Mock(return_value=False)) def test_invalid_path(self): self.assertRaises(ConfigParseError, Config, 'postgres0') @patch.object(Config, 'get') @patch('patroni.config.logger') def test__validate_failover_tags(self, mock_logger, mock_get): """Ensures that only one of `nofailover` or `failover_priority` can be provided""" config = Config("postgres0.yml") # Providing one of `nofailover` or `failover_priority` is fine for single_param in ({"nofailover": True}, {"failover_priority": 1}, {"failover_priority": 0}): mock_get.side_effect = [single_param] * 2 self.assertIsNone(config._validate_failover_tags()) mock_logger.warning.assert_not_called() # Providing both `nofailover` and `failover_priority` is fine if consistent for consistent_state in ( {"nofailover": False, "failover_priority": 1}, {"nofailover": True, "failover_priority": 0}, {"nofailover": "False", "failover_priority": 0} ): mock_get.side_effect = [consistent_state] * 2 self.assertIsNone(config._validate_failover_tags()) mock_logger.warning.assert_not_called() # Providing both inconsistently should log a warning for inconsistent_state in ( {"nofailover": False, "failover_priority": 0}, {"nofailover": True, "failover_priority": 1}, {"nofailover": "False", "failover_priority": 1}, {"nofailover": "", "failover_priority": 0} ): mock_get.side_effect = [inconsistent_state] * 2 self.assertIsNone(config._validate_failover_tags()) mock_logger.warning.assert_called_once_with( 'Conflicting configuration between nofailover: %s and failover_priority: %s.' + ' Defaulting to nofailover: %s', inconsistent_state['nofailover'], inconsistent_state['failover_priority'], inconsistent_state['nofailover']) mock_logger.warning.reset_mock() def test__process_postgresql_parameters(self): expected_params = { 'f.oo': 'bar', # not in ConfigHandler.CMDLINE_OPTIONS 'max_connections': 100, # IntValidator 'wal_keep_size': '128MB', # IntValidator 'wal_level': 'hot_standby', # EnumValidator } input_params = deepcopy(expected_params) input_params['max_connections'] = '100' self.assertEqual(self.config._process_postgresql_parameters(input_params), expected_params) expected_params['f.oo'] = input_params['f.oo'] = '100' self.assertEqual(self.config._process_postgresql_parameters(input_params), expected_params) input_params['wal_level'] = 'cold_standby' expected_params.pop('wal_level') self.assertEqual(self.config._process_postgresql_parameters(input_params), expected_params) input_params['max_connections'] = 10 expected_params.pop('max_connections') self.assertEqual(self.config._process_postgresql_parameters(input_params), expected_params) def test__validate_and_adjust_timeouts(self): with patch('patroni.config.logger.warning') as mock_logger: self.config._validate_and_adjust_timeouts({'ttl': 15}) self.assertEqual(mock_logger.call_args_list[0][0], ("%s=%d can't be smaller than %d, adjusting...", 'ttl', 15, 20)) with patch('patroni.config.logger.warning') as mock_logger: self.config._validate_and_adjust_timeouts({'loop_wait': 0}) self.assertEqual(mock_logger.call_args_list[0][0], ("%s=%d can't be smaller than %d, adjusting...", 'loop_wait', 0, 1)) with patch('patroni.config.logger.warning') as mock_logger: self.config._validate_and_adjust_timeouts({'retry_timeout': 1}) self.assertEqual(mock_logger.call_args_list[0][0], ("%s=%d can't be smaller than %d, adjusting...", 'retry_timeout', 1, 3)) with patch('patroni.config.logger.warning') as mock_logger: self.config._validate_and_adjust_timeouts({'ttl': 20, 'loop_wait': 11, 'retry_timeout': 5}) self.assertEqual(mock_logger.call_args_list[0][0], ('Violated the rule "loop_wait + 2*retry_timeout <= ttl", where ttl=%d ' 'and retry_timeout=%d. Adjusting loop_wait from %d to %d', 20, 5, 11, 10)) with patch('patroni.config.logger.warning') as mock_logger: self.config._validate_and_adjust_timeouts({'ttl': 20, 'loop_wait': 10, 'retry_timeout': 10}) self.assertEqual(mock_logger.call_args_list[0][0], ('Violated the rule "loop_wait + 2*retry_timeout <= ttl", where ttl=%d. Adjusting' ' loop_wait from %d to %d and retry_timeout from %d to %d', 20, 10, 1, 10, 9)) def test_global_config_is_synchronous_mode(self): # we should ignore synchronous_mode setting in a standby cluster config = {'standby_cluster': {'host': 'some_host'}, 'synchronous_mode': True} self.assertFalse(GlobalConfig(config).is_synchronous_mode) patroni-3.2.2/tests/test_config_generator.py000066400000000000000000000400211455170150700212620ustar00rootroot00000000000000import os import psutil import unittest import yaml from . import MockConnect, MockCursor, MockConnectionInfo from copy import deepcopy from mock import MagicMock, Mock, PropertyMock, mock_open as _mock_open, patch from patroni.__main__ import main as _main from patroni.config import Config from patroni.config_generator import AbstractConfigGenerator, get_address, NO_VALUE_MSG from patroni.log import PatroniLogger from patroni.utils import patch_config from . import psycopg_connect HOSTNAME = 'test_hostname' IP = '1.9.8.4' def mock_open(*args, **kwargs): ret = _mock_open(*args, **kwargs) ret.return_value.__iter__ = lambda o: iter(o.readline, '') if not kwargs.get('read_data'): ret.return_value.readline = Mock(return_value=None) return ret @patch('patroni.psycopg.connect', psycopg_connect) @patch('builtins.open', MagicMock()) @patch('subprocess.check_output', Mock(return_value=b"postgres (PostgreSQL) 16.2")) @patch('psutil.Process.exe', Mock(return_value='/bin/dir/from/running/postgres')) @patch('psutil.Process.__init__', Mock(return_value=None)) @patch.object(AbstractConfigGenerator, '_HOSTNAME', HOSTNAME) @patch.object(AbstractConfigGenerator, '_IP', IP) class TestGenerateConfig(unittest.TestCase): def setUp(self): os.environ['PATRONI_SCOPE'] = 'scope_from_env' os.environ['PATRONI_POSTGRESQL_BIN_DIR'] = '/bin/from/env' os.environ['PATRONI_SUPERUSER_USERNAME'] = 'su_user_from_env' os.environ['PATRONI_SUPERUSER_PASSWORD'] = 'su_pwd_from_env' os.environ['PATRONI_REPLICATION_USERNAME'] = 'repl_user_from_env' os.environ['PATRONI_REPLICATION_PASSWORD'] = 'repl_pwd_from_env' os.environ['PATRONI_REWIND_USERNAME'] = 'rewind_user_from_env' os.environ['PGUSER'] = 'pguser_from_env' os.environ['PGPASSWORD'] = 'pguser_pwd_from_env' os.environ['PATRONI_RESTAPI_CONNECT_ADDRESS'] = 'localhost:8080' os.environ['PATRONI_RESTAPI_LISTEN'] = 'localhost:8080' os.environ['PATRONI_POSTGRESQL_BIN_POSTGRES'] = 'custom_postgres_bin_from_env' self.environ = deepcopy(os.environ) dynamic_config = Config.get_default_config() dynamic_config['postgresql']['parameters'] = dict(dynamic_config['postgresql']['parameters']) del dynamic_config['standby_cluster'] dynamic_config['postgresql']['parameters']['wal_keep_segments'] = 8 dynamic_config['postgresql']['use_pg_rewind'] = True self.config = { 'scope': self.environ['PATRONI_SCOPE'], 'name': HOSTNAME, 'log': { 'level': PatroniLogger.DEFAULT_LEVEL, 'traceback_level': PatroniLogger.DEFAULT_TRACEBACK_LEVEL, 'format': PatroniLogger.DEFAULT_FORMAT, 'max_queue_size': PatroniLogger.DEFAULT_MAX_QUEUE_SIZE }, 'restapi': { 'connect_address': self.environ['PATRONI_RESTAPI_CONNECT_ADDRESS'], 'listen': self.environ['PATRONI_RESTAPI_LISTEN'] }, 'bootstrap': { 'dcs': dynamic_config }, 'postgresql': { 'connect_address': IP + ':5432', 'data_dir': NO_VALUE_MSG, 'listen': IP + ':5432', 'pg_hba': ['host all all all md5', f'host replication {self.environ["PATRONI_REPLICATION_USERNAME"]} all md5'], 'authentication': {'superuser': {'username': self.environ['PATRONI_SUPERUSER_USERNAME'], 'password': self.environ['PATRONI_SUPERUSER_PASSWORD']}, 'replication': {'username': self.environ['PATRONI_REPLICATION_USERNAME'], 'password': self.environ['PATRONI_REPLICATION_PASSWORD']}, 'rewind': {'username': self.environ['PATRONI_REWIND_USERNAME']}}, 'bin_dir': self.environ['PATRONI_POSTGRESQL_BIN_DIR'], 'bin_name': {'postgres': self.environ['PATRONI_POSTGRESQL_BIN_POSTGRES']}, 'parameters': {'password_encryption': 'md5'} } } def _set_running_instance_config_vals(self): # values are taken from tests/__init__.py conf = { 'scope': 'my_cluster', 'bootstrap': { 'dcs': { 'postgresql': { 'parameters': { 'max_connections': 42, 'max_locks_per_transaction': 73, 'max_replication_slots': 21, 'max_wal_senders': 37, 'wal_level': 'replica', 'wal_keep_segments': None }, 'use_pg_rewind': None } } }, 'postgresql': { 'connect_address': f'{IP}:bar', 'listen': '6.6.6.6:1984', 'data_dir': 'data', 'bin_dir': '/bin/dir/from/running', 'parameters': { 'archive_command': 'my archive command', 'hba_file': os.path.join('data', 'pg_hba.conf'), 'ident_file': os.path.join('data', 'pg_ident.conf'), 'password_encryption': None }, 'authentication': { 'superuser': { 'username': 'foobar', 'password': 'qwerty', 'channel_binding': 'prefer', 'gssencmode': 'prefer', 'sslmode': 'prefer' }, 'replication': { 'username': NO_VALUE_MSG, 'password': NO_VALUE_MSG }, 'rewind': None }, }, 'tags': { 'failover_priority': 1, 'noloadbalance': False, 'clonefrom': True, 'nosync': False, } } patch_config(self.config, conf) def _get_running_instance_open_res(self): hba_content = '\n'.join(self.config['postgresql']['pg_hba'] + ['#host all all all md5', ' host all all all md5', '', 'hostall all all md5']) ident_content = '\n'.join(['# something very interesting', ' ']) self.config['postgresql']['pg_hba'] += ['host all all all md5'] return [ mock_open(read_data=hba_content)(), mock_open(read_data=ident_content)(), mock_open(read_data='1984')(), mock_open()() ] @patch('os.makedirs') def test_generate_sample_config_pre_13_dir_creation(self, mock_makedir): with patch('sys.argv', ['patroni.py', '--generate-sample-config', '/foo/bar.yml']), \ patch('subprocess.check_output', Mock(return_value=b"postgres (PostgreSQL) 9.4.3")) as pg_bin_mock, \ patch('builtins.open', _mock_open()) as mocked_file, \ self.assertRaises(SystemExit) as e: _main() self.assertEqual(self.config, yaml.safe_load(mocked_file().write.call_args_list[0][0][0])) self.assertEqual(e.exception.code, 0) mock_makedir.assert_called_once() pg_bin_mock.assert_called_once_with([os.path.join(self.environ['PATRONI_POSTGRESQL_BIN_DIR'], self.environ['PATRONI_POSTGRESQL_BIN_POSTGRES']), '--version']) @patch('os.makedirs', Mock()) def test_generate_sample_config_16(self): conf = { 'bootstrap': { 'dcs': { 'postgresql': { 'parameters': { 'wal_keep_size': '128MB', 'wal_keep_segments': None }, } } }, 'postgresql': { 'parameters': { 'password_encryption': 'scram-sha-256' }, 'pg_hba': ['host all all all scram-sha-256', f'host replication {self.environ["PATRONI_REPLICATION_USERNAME"]} all scram-sha-256'], 'authentication': { 'rewind': { 'username': self.environ['PATRONI_REWIND_USERNAME'], 'password': NO_VALUE_MSG} }, } } patch_config(self.config, conf) with patch('sys.argv', ['patroni.py', '--generate-sample-config', '/foo/bar.yml']), \ patch('builtins.open', _mock_open()) as mocked_file, \ self.assertRaises(SystemExit) as e: _main() self.assertEqual(self.config, yaml.safe_load(mocked_file().write.call_args_list[0][0][0])) self.assertEqual(e.exception.code, 0) @patch('os.makedirs', Mock()) @patch('sys.stdout') def test_generate_config_running_instance_16(self, mock_sys_stdout): self._set_running_instance_config_vals() with patch('builtins.open', Mock(side_effect=self._get_running_instance_open_res())), \ patch('sys.argv', ['patroni.py', '--generate-config', '--dsn', 'host=foo port=bar user=foobar password=qwerty']), \ self.assertRaises(SystemExit) as e: _main() self.assertEqual(e.exception.code, 0) self.assertEqual(self.config, yaml.safe_load(mock_sys_stdout.write.call_args_list[0][0][0])) @patch('os.makedirs', Mock()) @patch('sys.stdout') def test_generate_config_running_instance_16_connect_from_env(self, mock_sys_stdout): self._set_running_instance_config_vals() # su auth params and connect host from env os.environ['PGCHANNELBINDING'] = \ self.config['postgresql']['authentication']['superuser']['channel_binding'] = 'disable' conf = { 'scope': 'my_cluster', 'bootstrap': { 'dcs': { 'postgresql': { 'parameters': { 'max_connections': 42, 'max_locks_per_transaction': 73, 'max_replication_slots': 21, 'max_wal_senders': 37, 'wal_level': 'replica', 'wal_keep_segments': None }, 'use_pg_rewind': None } } }, 'postgresql': { 'connect_address': f'{IP}:1984', 'authentication': { 'superuser': { 'username': self.environ['PGUSER'], 'password': self.environ['PGPASSWORD'], 'gssencmode': None, 'sslmode': None }, }, } } patch_config(self.config, conf) with patch('builtins.open', Mock(side_effect=self._get_running_instance_open_res())), \ patch('sys.argv', ['patroni.py', '--generate-config']), \ patch.object(MockConnect, 'server_version', PropertyMock(return_value=160000)), \ self.assertRaises(SystemExit) as e: _main() self.assertEqual(e.exception.code, 0) self.assertEqual(self.config, yaml.safe_load(mock_sys_stdout.write.call_args_list[0][0][0])) def test_generate_config_running_instance_errors(self): # 1. Wrong DSN format with patch('sys.argv', ['patroni.py', '--generate-config', '--dsn', 'host:foo port:bar user:foobar']), \ self.assertRaises(SystemExit) as e: _main() self.assertIn('Failed to parse DSN string', e.exception.code) # 2. User is not a superuser with patch('sys.argv', ['patroni.py', '--generate-config', '--dsn', 'host=foo port=bar user=foobar password=pwd_from_dsn']), \ patch.object(MockCursor, 'rowcount', PropertyMock(return_value=0), create=True), \ patch.object(MockConnectionInfo, 'parameter_status', Mock(return_value='off')), \ self.assertRaises(SystemExit) as e: _main() self.assertIn('The provided user does not have superuser privilege', e.exception.code) # 3. Error while calling postgres --version with patch('subprocess.check_output', Mock(side_effect=OSError)), \ patch('sys.argv', ['patroni.py', '--generate-sample-config']), \ self.assertRaises(SystemExit) as e: _main() self.assertIn('Failed to get postgres version:', e.exception.code) with patch('sys.argv', ['patroni.py', '--generate-config']): # 4. empty postmaster.pid with patch('builtins.open', Mock(side_effect=[mock_open(read_data='hba_content')(), mock_open(read_data='ident_content')(), mock_open(read_data='')()])), \ self.assertRaises(SystemExit) as e: _main() self.assertIn('Failed to obtain postmaster pid from postmaster.pid file', e.exception.code) # 5. Failed to open postmaster.pid with patch('builtins.open', Mock(side_effect=[mock_open(read_data='hba_content')(), mock_open(read_data='ident_content')(), OSError])), \ self.assertRaises(SystemExit) as e: _main() self.assertIn('Error while reading postmaster.pid file', e.exception.code) # 6. Invalid postmaster pid with patch('builtins.open', Mock(side_effect=[mock_open(read_data='hba_content')(), mock_open(read_data='ident_content')(), mock_open(read_data='1984')()])), \ patch('psutil.Process.__init__', Mock(return_value=None)), \ patch('psutil.Process.exe', Mock(side_effect=psutil.NoSuchProcess(1984))), \ self.assertRaises(SystemExit) as e: _main() self.assertIn("Obtained postmaster pid doesn't exist", e.exception.code) # 7. Failed to open pg_hba with patch('builtins.open', Mock(side_effect=OSError)), \ self.assertRaises(SystemExit) as e: _main() self.assertIn('Failed to read pg_hba.conf', e.exception.code) # 8. Failed to open pg_ident with patch('builtins.open', Mock(side_effect=[mock_open(read_data='hba_content')(), OSError])), \ self.assertRaises(SystemExit) as e: _main() self.assertIn('Failed to read pg_ident.conf', e.exception.code) # 9. Failed PG connecttion from . import psycopg with patch('patroni.psycopg.connect', side_effect=psycopg.Error), \ self.assertRaises(SystemExit) as e: _main() self.assertIn('Failed to establish PostgreSQL connection', e.exception.code) # 10. An unexpected error with patch.object(AbstractConfigGenerator, '__init__', side_effect=psycopg.Error), \ self.assertRaises(SystemExit) as e: _main() self.assertIn('Unexpected exception', e.exception.code) def test_get_address(self): with patch('socket.getaddrinfo', Mock(side_effect=Exception)), \ patch('logging.warning') as mock_warning: self.assertEqual(get_address(), (NO_VALUE_MSG, NO_VALUE_MSG)) self.assertIn('Failed to obtain address: %r', mock_warning.call_args_list[0][0]) patroni-3.2.2/tests/test_consul.py000066400000000000000000000374011455170150700172620ustar00rootroot00000000000000import consul import unittest from consul import ConsulException, NotFound from mock import Mock, PropertyMock, patch from patroni.dcs.consul import AbstractDCS, Cluster, Consul, ConsulInternalError, \ ConsulError, ConsulClient, HTTPClient, InvalidSessionTTL, InvalidSession, RetryFailedError from . import SleepException def kv_get(self, key, **kwargs): if key == 'service/test/members/postgresql1': return '1', {'Session': 'fd4f44fe-2cac-bba5-a60b-304b51ff39b7'} if key == 'service/test/': return None, None if key == 'service/good/leader': return '1', None if key == 'service/good/sync': return '1', {'ModifyIndex': 1, 'Value': b'{}'} good_cls = ('6429', [{'CreateIndex': 1334, 'Flags': 0, 'Key': key + 'failover', 'LockIndex': 0, 'ModifyIndex': 1334, 'Value': b''}, {'CreateIndex': 1334, 'Flags': 0, 'Key': key + '1/initialize', 'LockIndex': 0, 'ModifyIndex': 1334, 'Value': b'postgresql0'}, {'CreateIndex': 1334, 'Flags': 0, 'Key': key + 'initialize', 'LockIndex': 0, 'ModifyIndex': 1334, 'Value': b'postgresql0'}, {'CreateIndex': 2621, 'Flags': 0, 'Key': key + 'leader', 'LockIndex': 1, 'ModifyIndex': 2621, 'Session': 'fd4f44fe-2cac-bba5-a60b-304b51ff39b7', 'Value': b'postgresql1'}, {'CreateIndex': 6156, 'Flags': 0, 'Key': key + 'members/postgresql0', 'LockIndex': 1, 'ModifyIndex': 6156, 'Session': '782e6da4-ed02-3aef-7963-99a90ed94b53', 'Value': ('postgres://replicator:rep-pass@127.0.0.1:5432/postgres' + '?application_name=http://127.0.0.1:8008/patroni').encode('utf-8')}, {'CreateIndex': 2630, 'Flags': 0, 'Key': key + 'members/postgresql1', 'LockIndex': 1, 'ModifyIndex': 2630, 'Session': 'fd4f44fe-2cac-bba5-a60b-304b51ff39b7', 'Value': ('postgres://replicator:rep-pass@127.0.0.1:5433/postgres' + '?application_name=http://127.0.0.1:8009/patroni').encode('utf-8')}, {'CreateIndex': 1085, 'Flags': 0, 'Key': key + 'optime/leader', 'LockIndex': 0, 'ModifyIndex': 6429, 'Value': b'4496294792'}, {'CreateIndex': 1085, 'Flags': 0, 'Key': key + 'sync', 'LockIndex': 0, 'ModifyIndex': 6429, 'Value': b'{"leader": "leader", "sync_standby": null}'}, {'CreateIndex': 1085, 'Flags': 0, 'Key': key + 'failsafe', 'LockIndex': 0, 'ModifyIndex': 6429, 'Value': b'{'}, {'CreateIndex': 1085, 'Flags': 0, 'Key': key + 'status', 'LockIndex': 0, 'ModifyIndex': 6429, 'Value': b'{"optime":4496294792, "slots":{"ls":12345}}'}]) if key == 'service/good/': return good_cls if key == 'service/broken/': good_cls[1][-1]['Value'] = b'{' return good_cls if key == 'service/legacy/': good_cls[1].pop() return good_cls raise ConsulException class TestHTTPClient(unittest.TestCase): def setUp(self): c = ConsulClient() self.client = c.http self.client.http.request = Mock() def test_get(self): self.client.get(Mock(), '') self.client.get(Mock(), '', {'wait': '1s', 'index': 1, 'token': 'foo'}) self.client.http.request.return_value.status = 500 self.client.http.request.return_value.data = b'Foo' self.assertRaises(ConsulInternalError, self.client.get, Mock(), '') self.client.http.request.return_value.data = b"Invalid Session TTL '3000000000', must be between [10s=24h0m0s]" self.assertRaises(InvalidSessionTTL, self.client.get, Mock(), '') self.client.http.request.return_value.data = b"invalid session '16492f43-c2d6-5307-432f-e32d6f7bcbd0'" self.assertRaises(InvalidSession, self.client.get, Mock(), '') def test_unknown_method(self): try: self.client.bla(Mock(), '') self.assertFail() except Exception as e: self.assertTrue(isinstance(e, AttributeError)) def test_put(self): self.client.put(Mock(), '/v1/session/create') self.client.put(Mock(), '/v1/session/create', params=[], data='{"foo": "bar"}') @patch.object(consul.Consul.KV, 'get', kv_get) class TestConsul(unittest.TestCase): @patch.object(consul.Consul.Session, 'create', Mock(return_value='fd4f44fe-2cac-bba5-a60b-304b51ff39b7')) @patch.object(consul.Consul.Session, 'renew', Mock(side_effect=NotFound)) @patch.object(consul.Consul.KV, 'get', kv_get) @patch.object(consul.Consul.KV, 'delete', Mock()) def setUp(self): Consul({'ttl': 30, 'scope': 't', 'name': 'p', 'url': 'https://l:1', 'retry_timeout': 10, 'verify': 'on', 'key': 'foo', 'cert': 'bar', 'cacert': 'buz', 'token': 'asd', 'dc': 'dc1', 'register_service': True}) Consul({'ttl': 30, 'scope': 't_', 'name': 'p', 'url': 'https://l:1', 'retry_timeout': 10, 'verify': 'on', 'cert': 'bar', 'cacert': 'buz', 'register_service': True}) self.c = Consul({'ttl': 30, 'scope': 'test', 'name': 'postgresql1', 'host': 'localhost:1', 'retry_timeout': 10, 'register_service': True, 'service_check_tls_server_name': True}) self.c._base_path = 'service/good' self.c.get_cluster() @patch('time.sleep', Mock(side_effect=SleepException)) @patch.object(consul.Consul.Session, 'create', Mock(side_effect=ConsulException)) def test_create_session(self): self.c._session = None self.assertRaises(SleepException, self.c.create_session) @patch.object(consul.Consul.Session, 'renew', Mock(side_effect=NotFound)) @patch.object(consul.Consul.Session, 'create', Mock(side_effect=[InvalidSessionTTL, ConsulException])) @patch.object(consul.Consul.Agent, 'self', Mock(return_value={'Config': {'SessionTTLMin': 0}})) @patch.object(HTTPClient, 'set_ttl', Mock(side_effect=ValueError)) def test_referesh_session(self): self.c._session = '1' self.assertFalse(self.c.refresh_session()) self.c._last_session_refresh = 0 self.assertRaises(ConsulError, self.c.refresh_session) @patch.object(consul.Consul.KV, 'delete', Mock()) def test_get_cluster(self): self.c._base_path = 'service/test' self.assertIsInstance(self.c.get_cluster(), Cluster) self.assertIsInstance(self.c.get_cluster(), Cluster) self.c._base_path = 'service/fail' self.assertRaises(ConsulError, self.c.get_cluster) self.c._base_path = 'service/broken' self.assertIsInstance(self.c.get_cluster(), Cluster) self.c._base_path = 'service/legacy' self.assertIsInstance(self.c.get_cluster(), Cluster) def test__get_citus_cluster(self): self.c._citus_group = '0' cluster = self.c.get_cluster() self.assertIsInstance(cluster, Cluster) self.assertIsInstance(cluster.workers[1], Cluster) @patch.object(consul.Consul.KV, 'delete', Mock(side_effect=[ConsulException, True, True, True])) @patch.object(consul.Consul.KV, 'put', Mock(side_effect=[True, ConsulException, InvalidSession])) def test_touch_member(self): self.c.refresh_session = Mock(return_value=False) with patch.object(Consul, 'update_service', Mock(side_effect=Exception)): self.c.touch_member({'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5433/postgres', 'api_url': 'http://127.0.0.1:8009/patroni'}) self.c._register_service = True self.c.refresh_session = Mock(return_value=True) for _ in range(0, 4): self.c.touch_member({'balbla': 'blabla'}) self.c.refresh_session = Mock(side_effect=ConsulError('foo')) self.assertFalse(self.c.touch_member({'balbla': 'blabla'})) @patch.object(consul.Consul.KV, 'put', Mock(side_effect=[InvalidSession, False, InvalidSession])) def test_take_leader(self): self.c.set_ttl(20) self.c._do_refresh_session = Mock() self.assertFalse(self.c.take_leader()) with patch('time.time', Mock(side_effect=[0, 100])): self.assertRaises(ConsulError, self.c.take_leader) with patch('time.time', Mock(side_effect=[0, 0, 0, 0, 0, 0, 100])): self.assertRaises(ConsulError, self.c.take_leader) @patch.object(consul.Consul.KV, 'put', Mock(return_value=True)) def test_set_failover_value(self): self.c.set_failover_value('') @patch.object(consul.Consul.KV, 'put', Mock(return_value=True)) def test_set_config_value(self): self.c.set_config_value('') @patch.object(Cluster, 'min_version', PropertyMock(return_value=(2, 0))) @patch.object(consul.Consul.KV, 'put', Mock(side_effect=ConsulException)) def test_write_leader_optime(self): self.c.get_cluster() self.c.write_leader_optime('1') @patch.object(consul.Consul.Session, 'renew') @patch.object(consul.Consul.KV, 'put', Mock(side_effect=ConsulException)) def test_update_leader(self, mock_renew): leader = self.c.get_cluster().leader self.c._session = 'fd4f44fe-2cac-bba5-a60b-304b51ff39b8' with patch.object(consul.Consul.KV, 'delete', Mock(return_value=True)): with patch.object(consul.Consul.KV, 'put', Mock(return_value=True)): self.assertTrue(self.c.update_leader(leader, 12345, failsafe={'foo': 'bar'})) with patch.object(consul.Consul.KV, 'put', Mock(side_effect=ConsulException)): self.assertFalse(self.c.update_leader(leader, 12345)) with patch('time.time', Mock(side_effect=[0, 0, 0, 0, 100, 200, 300])): self.assertRaises(ConsulError, self.c.update_leader, leader, 12345) with patch('time.time', Mock(side_effect=[0, 100, 200, 300])): self.assertRaises(ConsulError, self.c.update_leader, leader, 12345) with patch.object(consul.Consul.KV, 'delete', Mock(side_effect=ConsulException)): self.assertFalse(self.c.update_leader(leader, 12347)) mock_renew.side_effect = RetryFailedError('') self.c._last_session_refresh = 0 self.assertRaises(ConsulError, self.c.update_leader, leader, 12346) mock_renew.side_effect = ConsulException self.assertFalse(self.c.update_leader(leader, 12347)) @patch.object(consul.Consul.KV, 'delete', Mock(return_value=True)) def test_delete_leader(self): leader = self.c.get_cluster().leader self.c.delete_leader(leader) self.c._name = 'other' self.c.delete_leader(leader) @patch.object(consul.Consul.KV, 'put', Mock(return_value=True)) def test_initialize(self): self.c.initialize() @patch.object(consul.Consul.KV, 'delete', Mock(return_value=True)) def test_cancel_initialization(self): self.c.cancel_initialization() @patch.object(consul.Consul.KV, 'delete', Mock(return_value=True)) def test_delete_cluster(self): self.c.delete_cluster() @patch.object(AbstractDCS, 'watch', Mock()) def test_watch(self): self.c.watch(None, 1) self.c._name = '' self.c.watch(6429, 1) with patch.object(consul.Consul.KV, 'get', Mock(side_effect=ConsulException)): self.c.watch(6429, 1) def test_set_retry_timeout(self): self.c.set_retry_timeout(10) @patch.object(consul.Consul.KV, 'delete', Mock(return_value=True)) @patch.object(consul.Consul.KV, 'put', Mock(return_value=True)) def test_sync_state(self): self.assertEqual(self.c.set_sync_state_value('{}'), 1) with patch('time.time', Mock(side_effect=[1, 100, 1000])): self.assertFalse(self.c.set_sync_state_value('{}')) with patch.object(consul.Consul.KV, 'put', Mock(return_value=False)): self.assertFalse(self.c.set_sync_state_value('{}')) self.assertTrue(self.c.delete_sync_state()) @patch.object(consul.Consul.KV, 'put', Mock(return_value=True)) def test_set_history_value(self): self.assertTrue(self.c.set_history_value('{}')) @patch.object(consul.Consul.Agent.Service, 'register', Mock(side_effect=(False, True, True, True))) @patch.object(consul.Consul.Agent.Service, 'deregister', Mock(return_value=True)) def test_update_service(self): d = {'role': 'replica', 'api_url': 'http://a/t', 'conn_url': 'pg://c:1', 'state': 'running'} self.assertIsNone(self.c.update_service({}, {})) self.assertFalse(self.c.update_service({}, d)) self.assertTrue(self.c.update_service(d, d)) self.assertIsNone(self.c.update_service(d, d)) d['state'] = 'stopped' self.assertTrue(self.c.update_service(d, d, force=True)) d['state'] = 'unknown' self.assertIsNone(self.c.update_service({}, d)) d['state'] = 'running' d['role'] = 'bla' self.assertIsNone(self.c.update_service({}, d)) for role in ('master', 'primary'): d['role'] = role self.assertTrue(self.c.update_service({}, d)) @patch.object(consul.Consul.KV, 'put', Mock(side_effect=ConsulException)) def test_reload_config(self): self.assertEqual([], self.c._service_tags) self.c.reload_config({'consul': {'token': 'foo', 'register_service': True, 'service_tags': ['foo']}, 'loop_wait': 10, 'ttl': 30, 'retry_timeout': 10}) self.assertEqual(["foo"], self.c._service_tags) self.c.refresh_session = Mock(return_value=False) d = {'role': 'replica', 'api_url': 'http://a/t', 'conn_url': 'pg://c:1', 'state': 'running'} # Changing register_service from True to False calls deregister() self.c.reload_config({'consul': {'register_service': False}, 'loop_wait': 10, 'ttl': 30, 'retry_timeout': 10}) with patch('consul.Consul.Agent.Service.deregister') as mock_deregister: self.c.touch_member(d) mock_deregister.assert_called_once() self.assertEqual([], self.c._service_tags) # register_service staying False between reloads does not call deregister() self.c.reload_config({'consul': {'register_service': False}, 'loop_wait': 10, 'ttl': 30, 'retry_timeout': 10}) with patch('consul.Consul.Agent.Service.deregister') as mock_deregister: self.c.touch_member(d) self.assertFalse(mock_deregister.called) # Changing register_service from False to True calls register() self.c.reload_config({'consul': {'register_service': True}, 'loop_wait': 10, 'ttl': 30, 'retry_timeout': 10}) with patch('consul.Consul.Agent.Service.register') as mock_register: self.c.touch_member(d) mock_register.assert_called_once() # register_service staying True between reloads does not call register() self.c.reload_config({'consul': {'register_service': True}, 'loop_wait': 10, 'ttl': 30, 'retry_timeout': 10}) with patch('consul.Consul.Agent.Service.register') as mock_register: self.c.touch_member(d) self.assertFalse(mock_deregister.called) # register_service staying True between reloads does calls register() if other service data has changed self.c.reload_config({'consul': {'register_service': True}, 'loop_wait': 10, 'ttl': 30, 'retry_timeout': 10}) with patch('consul.Consul.Agent.Service.register') as mock_register: self.c.touch_member(d) mock_register.assert_called_once() # register_service staying True between reloads does calls register() if service_tags have changed self.c.reload_config({'consul': {'register_service': True, 'service_tags': ['foo']}, 'loop_wait': 10, 'ttl': 30, 'retry_timeout': 10}) with patch('consul.Consul.Agent.Service.register') as mock_register: self.c.touch_member(d) mock_register.assert_called_once() patroni-3.2.2/tests/test_ctl.py000066400000000000000000001214411455170150700165370ustar00rootroot00000000000000import etcd import mock import os import unittest from click.testing import CliRunner from datetime import datetime, timedelta from mock import patch, Mock, PropertyMock from patroni.ctl import ctl, load_config, output_members, get_dcs, parse_dcs, \ get_all_members, get_any_member, get_cursor, query_member, PatroniCtlException, apply_config_changes, \ format_config_for_editing, show_diff, invoke_editor, format_pg_version, CONFIG_FILE_PATH, PatronictlPrettyTable from patroni.dcs.etcd import AbstractEtcdClientWithFailover, Cluster, Failover from patroni.psycopg import OperationalError from patroni.utils import tzutc from prettytable import PrettyTable, ALL from urllib3 import PoolManager from . import MockConnect, MockCursor, MockResponse, psycopg_connect from .test_etcd import etcd_read, socket_getaddrinfo from .test_ha import get_cluster_initialized_without_leader, get_cluster_initialized_with_leader, \ get_cluster_initialized_with_only_leader, get_cluster_not_initialized_without_leader, get_cluster, Member DEFAULT_CONFIG = { 'scope': 'alpha', 'restapi': {'listen': '::', 'certfile': 'a'}, 'ctl': {'certfile': 'a'}, 'etcd': {'host': 'localhost:2379'}, 'citus': {'database': 'citus', 'group': 0}, 'postgresql': {'data_dir': '.', 'pgpass': './pgpass', 'parameters': {}, 'retry_timeout': 5} } @patch('patroni.ctl.load_config', Mock(return_value=DEFAULT_CONFIG)) class TestCtl(unittest.TestCase): TEST_ROLES = ('master', 'primary', 'leader') @patch('socket.getaddrinfo', socket_getaddrinfo) @patch.object(AbstractEtcdClientWithFailover, '_get_machines_list', Mock(return_value=['http://remotehost:2379'])) def setUp(self): self.runner = CliRunner() self.e = get_dcs({'etcd': {'ttl': 30, 'host': 'ok:2379', 'retry_timeout': 10}, 'citus': {'group': 0}}, 'foo', None) @patch('patroni.ctl.logging.debug') def test_load_config(self, mock_logger_debug): runner = CliRunner() with runner.isolated_filesystem(): self.assertRaises(PatroniCtlException, load_config, './non-existing-config-file', None) with patch('os.path.exists', Mock(return_value=True)), \ patch('patroni.config.Config._load_config_path', Mock(return_value={})): load_config(CONFIG_FILE_PATH, None) mock_logger_debug.assert_called_once() self.assertEqual(('Ignoring configuration file "%s". It does not exists or is not readable.', CONFIG_FILE_PATH), mock_logger_debug.call_args[0]) mock_logger_debug.reset_mock() with patch('os.access', Mock(return_value=True)): load_config(CONFIG_FILE_PATH, '') mock_logger_debug.assert_called_once() self.assertEqual(('Loading configuration from file %s', CONFIG_FILE_PATH), mock_logger_debug.call_args[0]) mock_logger_debug.reset_mock() @patch('patroni.psycopg.connect', psycopg_connect) def test_get_cursor(self): for role in self.TEST_ROLES: self.assertIsNone(get_cursor({}, get_cluster_initialized_without_leader(), None, {}, role=role)) self.assertIsNotNone(get_cursor({}, get_cluster_initialized_with_leader(), None, {}, role=role)) # MockCursor returns pg_is_in_recovery as false self.assertIsNone(get_cursor({}, get_cluster_initialized_with_leader(), None, {}, role='replica')) self.assertIsNotNone(get_cursor({}, get_cluster_initialized_with_leader(), None, {'dbname': 'foo'}, role='any')) # Mutually exclusive options with self.assertRaises(PatroniCtlException) as e: get_cursor({}, get_cluster_initialized_with_leader(), None, {'dbname': 'foo'}, member_name='other', role='replica') self.assertEqual(str(e.exception), '--role and --member are mutually exclusive options') # Invalid member provided self.assertIsNone(get_cursor({}, get_cluster_initialized_with_leader(), None, {'dbname': 'foo'}, member_name='invalid')) # Valid member provided self.assertIsNotNone(get_cursor({}, get_cluster_initialized_with_leader(), None, {'dbname': 'foo'}, member_name='other')) def test_parse_dcs(self): assert parse_dcs(None) is None assert parse_dcs('localhost') == {'etcd': {'host': 'localhost:2379'}} assert parse_dcs('') == {'etcd': {'host': 'localhost:2379'}} assert parse_dcs('localhost:8500') == {'consul': {'host': 'localhost:8500'}} assert parse_dcs('zookeeper://localhost') == {'zookeeper': {'hosts': ['localhost:2181']}} assert parse_dcs('exhibitor://dummy') == {'exhibitor': {'hosts': ['dummy'], 'port': 8181}} assert parse_dcs('consul://localhost') == {'consul': {'host': 'localhost:8500'}} assert parse_dcs('etcd3://random.com:2399') == {'etcd3': {'host': 'random.com:2399'}} self.assertRaises(PatroniCtlException, parse_dcs, 'invalid://test') def test_output_members(self): scheduled_at = datetime.now(tzutc) + timedelta(seconds=600) cluster = get_cluster_initialized_with_leader(Failover(1, 'foo', 'bar', scheduled_at)) del cluster.members[1].data['conn_url'] for fmt in ('pretty', 'json', 'yaml', 'topology'): self.assertIsNone(output_members({}, cluster, name='abc', fmt=fmt)) with patch('click.echo') as mock_echo: self.assertIsNone(output_members({}, cluster, name='abc', fmt='tsv')) self.assertEqual(mock_echo.call_args[0][0], 'abc\tother\t\tReplica\trunning\t\tunknown') @patch('patroni.ctl.get_dcs') @patch.object(PoolManager, 'request', Mock(return_value=MockResponse())) def test_switchover(self, mock_get_dcs): mock_get_dcs.return_value = self.e mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader mock_get_dcs.return_value.set_failover_value = Mock() # Confirm result = self.runner.invoke(ctl, ['switchover', 'dummy', '--group', '0'], input='leader\nother\n\ny') self.assertEqual(result.exit_code, 0) # Abort result = self.runner.invoke(ctl, ['switchover', 'dummy', '--group', '0'], input='leader\nother\n\nN') self.assertEqual(result.exit_code, 1) # Without a candidate with --force option result = self.runner.invoke(ctl, ['switchover', 'dummy', '--group', '0', '--force']) self.assertEqual(result.exit_code, 0) # Scheduled (confirm) result = self.runner.invoke(ctl, ['switchover', 'dummy', '--group', '0'], input='leader\nother\n2300-01-01T12:23:00\ny') self.assertEqual(result.exit_code, 0) # Scheduled (abort) result = self.runner.invoke(ctl, ['switchover', 'dummy', '--group', '0', '--scheduled', '2015-01-01T12:00:00+01:00'], input='leader\nother\n\nN') self.assertEqual(result.exit_code, 1) # Scheduled with --force option result = self.runner.invoke(ctl, ['switchover', 'dummy', '--group', '0', '--force', '--scheduled', '2015-01-01T12:00:00+01:00']) self.assertEqual(result.exit_code, 0) # Scheduled in pause mode with patch('patroni.config.GlobalConfig.is_paused', PropertyMock(return_value=True)): result = self.runner.invoke(ctl, ['switchover', 'dummy', '--group', '0', '--force', '--scheduled', '2015-01-01T12:00:00']) self.assertEqual(result.exit_code, 1) self.assertIn("Can't schedule switchover in the paused state", result.output) # Target and source are equal result = self.runner.invoke(ctl, ['switchover', 'dummy', '--group', '0'], input='leader\nleader\n\ny') self.assertEqual(result.exit_code, 1) self.assertIn("Candidate ['other']", result.output) self.assertIn('Member leader is already the leader of cluster dummy', result.output) # Candidate is not a member of the cluster result = self.runner.invoke(ctl, ['switchover', 'dummy', '--group', '0'], input='leader\nReality\n\ny') self.assertEqual(result.exit_code, 1) self.assertIn('Member Reality does not exist in cluster dummy or is tagged as nofailover', result.output) # Invalid timestamp result = self.runner.invoke(ctl, ['switchover', 'dummy', '--group', '0', '--force', '--scheduled', 'invalid']) self.assertEqual(result.exit_code, 1) self.assertIn('Unable to parse scheduled timestamp', result.output) # Invalid timestamp result = self.runner.invoke(ctl, ['switchover', 'dummy', '--group', '0', '--force', '--scheduled', '2115-02-30T12:00:00+01:00']) self.assertEqual(result.exit_code, 1) self.assertIn('Unable to parse scheduled timestamp', result.output) # Specifying wrong leader result = self.runner.invoke(ctl, ['switchover', 'dummy', '--group', '0'], input='dummy') self.assertEqual(result.exit_code, 1) self.assertIn('Member dummy is not the leader of cluster dummy', result.output) # Errors while sending Patroni REST API request with patch.object(PoolManager, 'request', Mock(side_effect=Exception)): result = self.runner.invoke(ctl, ['switchover', 'dummy', '--group', '0'], input='leader\nother\n2300-01-01T12:23:00\ny') self.assertIn('falling back to DCS', result.output) with patch.object(PoolManager, 'request') as mock_api_request: mock_api_request.return_value.status = 500 result = self.runner.invoke(ctl, ['switchover', 'dummy', '--group', '0'], input='leader\nother\n\ny') self.assertIn('Switchover failed', result.output) mock_api_request.return_value.status = 501 mock_api_request.return_value.data = b'Server does not support this operation' result = self.runner.invoke(ctl, ['switchover', 'dummy', '--group', '0'], input='leader\nother\n\ny') self.assertIn('Switchover failed', result.output) # No members available mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_only_leader result = self.runner.invoke(ctl, ['switchover', 'dummy', '--group', '0'], input='leader\nother\n\ny') self.assertEqual(result.exit_code, 1) self.assertIn('No candidates found to switchover to', result.output) # No leader available mock_get_dcs.return_value.get_cluster = get_cluster_initialized_without_leader result = self.runner.invoke(ctl, ['switchover', 'dummy', '--group', '0'], input='leader\nother\n\ny') self.assertEqual(result.exit_code, 1) self.assertIn('This cluster has no leader', result.output) # Citus cluster, no group number specified result = self.runner.invoke(ctl, ['switchover', 'dummy', '--force'], input='\n') self.assertEqual(result.exit_code, 1) self.assertIn('For Citus clusters the --group must me specified', result.output) @patch('patroni.ctl.get_dcs') @patch.object(PoolManager, 'request', Mock(return_value=MockResponse())) @patch('patroni.ctl.request_patroni', Mock(return_value=MockResponse())) def test_failover(self, mock_get_dcs): mock_get_dcs.return_value.set_failover_value = Mock() # No candidate specified mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader result = self.runner.invoke(ctl, ['failover', 'dummy'], input='0\n') self.assertIn('Failover could be performed only to a specific candidate', result.output) # Candidate is the same as the leader result = self.runner.invoke(ctl, ['failover', 'dummy', '--group', '0'], input='leader\n') self.assertIn("Candidate ['other']", result.output) self.assertIn('Member leader is already the leader of cluster dummy', result.output) # Temp test to check a fallback to switchover if leader is specified with patch('patroni.ctl._do_failover_or_switchover') as failover_func_mock: result = self.runner.invoke(ctl, ['failover', '--leader', 'leader', 'dummy'], input='0\n') self.assertIn('Supplying a leader name using this command is deprecated', result.output) failover_func_mock.assert_called_once_with( DEFAULT_CONFIG, 'switchover', 'dummy', None, 'leader', None, False) cluster = get_cluster_initialized_with_leader(sync=('leader', 'other')) cluster.members.append(Member(0, 'async', 28, {'api_url': 'http://127.0.0.1:8012/patroni'})) cluster.config.data['synchronous_mode'] = True mock_get_dcs.return_value.get_cluster = Mock(return_value=cluster) # Failover to an async member in sync mode (confirm) result = self.runner.invoke(ctl, ['failover', 'dummy', '--group', '0', '--candidate', 'async'], input='y\ny') self.assertIn('Are you sure you want to failover to the asynchronous node async', result.output) self.assertEqual(result.exit_code, 0) # Failover to an async member in sync mode (abort) result = self.runner.invoke(ctl, ['failover', 'dummy', '--group', '0', '--candidate', 'async'], input='N') self.assertEqual(result.exit_code, 1) self.assertIn('Aborting failover', result.output) @patch('patroni.dcs.dcs_modules', Mock(return_value=['patroni.dcs.dummy', 'patroni.dcs.etcd'])) def test_get_dcs(self): self.assertRaises(PatroniCtlException, get_dcs, {'dummy': {}}, 'dummy', 0) @patch('patroni.psycopg.connect', psycopg_connect) @patch('patroni.ctl.query_member', Mock(return_value=([['mock column']], None))) @patch('patroni.ctl.get_dcs') @patch.object(etcd.Client, 'read', etcd_read) def test_query(self, mock_get_dcs): mock_get_dcs.return_value = self.e # Mutually exclusive for role in self.TEST_ROLES: result = self.runner.invoke(ctl, ['query', 'alpha', '--member', 'abc', '--role', role]) assert result.exit_code == 1 with self.runner.isolated_filesystem(): with open('dummy', 'w') as dummy_file: dummy_file.write('SELECT 1') # Mutually exclusive result = self.runner.invoke(ctl, ['query', 'alpha', '--file', 'dummy', '--command', 'dummy']) assert result.exit_code == 1 result = self.runner.invoke(ctl, ['query', 'alpha', '--member', 'abc', '--file', 'dummy']) assert result.exit_code == 0 os.remove('dummy') result = self.runner.invoke(ctl, ['query', 'alpha', '--command', 'SELECT 1']) assert 'mock column' in result.output # --command or --file is mandatory result = self.runner.invoke(ctl, ['query', 'alpha']) assert result.exit_code == 1 result = self.runner.invoke(ctl, ['query', 'alpha', '--command', 'SELECT 1', '--username', 'root', '--password', '--dbname', 'postgres'], input='ab\nab') assert 'mock column' in result.output def test_query_member(self): with patch('patroni.ctl.get_cursor', Mock(return_value=MockConnect().cursor())): for role in self.TEST_ROLES: rows = query_member({}, None, None, None, None, role, 'SELECT pg_catalog.pg_is_in_recovery()', {}) self.assertTrue('False' in str(rows)) with patch.object(MockCursor, 'execute', Mock(side_effect=OperationalError('bla'))): rows = query_member({}, None, None, None, None, 'replica', 'SELECT pg_catalog.pg_is_in_recovery()', {}) with patch('patroni.ctl.get_cursor', Mock(return_value=None)): # No role nor member given -- generic message rows = query_member({}, None, None, None, None, None, 'SELECT pg_catalog.pg_is_in_recovery()', {}) self.assertTrue('No connection is available' in str(rows)) # Member given -- message pointing to member rows = query_member({}, None, None, None, 'foo', None, 'SELECT pg_catalog.pg_is_in_recovery()', {}) self.assertTrue('No connection to member foo' in str(rows)) # Role given -- message pointing to role rows = query_member({}, None, None, None, None, 'replica', 'SELECT pg_catalog.pg_is_in_recovery()', {}) self.assertTrue('No connection to role replica' in str(rows)) with patch('patroni.ctl.get_cursor', Mock(side_effect=OperationalError('bla'))): rows = query_member({}, None, None, None, None, 'replica', 'SELECT pg_catalog.pg_is_in_recovery()', {}) @patch('patroni.ctl.get_dcs') def test_dsn(self, mock_get_dcs): mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader result = self.runner.invoke(ctl, ['dsn', 'alpha']) assert 'host=127.0.0.1 port=5435' in result.output # Mutually exclusive options for role in self.TEST_ROLES: result = self.runner.invoke(ctl, ['dsn', 'alpha', '--role', role, '--member', 'dummy']) assert result.exit_code == 1 # Non-existing member result = self.runner.invoke(ctl, ['dsn', 'alpha', '--member', 'dummy']) assert result.exit_code == 1 @patch.object(PoolManager, 'request') @patch('patroni.ctl.get_dcs') def test_reload(self, mock_get_dcs, mock_post): mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader result = self.runner.invoke(ctl, ['reload', 'alpha'], input='y') assert 'Failed: reload for member' in result.output mock_post.return_value.status = 200 result = self.runner.invoke(ctl, ['reload', 'alpha'], input='y') assert 'No changes to apply on member' in result.output mock_post.return_value.status = 202 result = self.runner.invoke(ctl, ['reload', 'alpha'], input='y') assert 'Reload request received for member' in result.output @patch.object(PoolManager, 'request') @patch('patroni.ctl.get_dcs') def test_restart_reinit(self, mock_get_dcs, mock_post): mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader mock_post.return_value.status = 503 result = self.runner.invoke(ctl, ['restart', 'alpha'], input='now\ny\n') assert 'Failed: restart for' in result.output assert result.exit_code == 0 result = self.runner.invoke(ctl, ['reinit', 'alpha'], input='y') assert result.exit_code == 1 # successful reinit result = self.runner.invoke(ctl, ['reinit', 'alpha', 'other'], input='y\ny') assert result.exit_code == 0 # Aborted restart result = self.runner.invoke(ctl, ['restart', 'alpha'], input='now\nN') assert result.exit_code == 1 result = self.runner.invoke(ctl, ['restart', 'alpha', '--pending', '--force']) assert result.exit_code == 0 # Aborted scheduled restart result = self.runner.invoke(ctl, ['restart', 'alpha', '--scheduled', '2019-10-01T14:30'], input='N') assert result.exit_code == 1 # Not a member result = self.runner.invoke(ctl, ['restart', 'alpha', 'dummy', '--any'], input='now\ny') assert result.exit_code == 1 # Wrong pg version result = self.runner.invoke(ctl, ['restart', 'alpha', '--any', '--pg-version', '9.1'], input='now\ny') assert 'Error: Invalid PostgreSQL version format' in result.output assert result.exit_code == 1 result = self.runner.invoke(ctl, ['restart', 'alpha', '--pending', '--force', '--timeout', '10min']) assert result.exit_code == 0 # normal restart, the schedule is actually parsed, but not validated in patronictl result = self.runner.invoke(ctl, ['restart', 'alpha', 'other', '--force', '--scheduled', '2300-10-01T14:30']) assert 'Failed: flush scheduled restart' in result.output with patch('patroni.config.GlobalConfig.is_paused', PropertyMock(return_value=True)): result = self.runner.invoke(ctl, ['restart', 'alpha', 'other', '--force', '--scheduled', '2300-10-01T14:30']) assert result.exit_code == 1 # force restart with restart already present result = self.runner.invoke(ctl, ['restart', 'alpha', 'other', '--force', '--scheduled', '2300-10-01T14:30']) assert result.exit_code == 0 ctl_args = ['restart', 'alpha', '--pg-version', '99.0', '--scheduled', '2300-10-01T14:30'] # normal restart, the schedule is actually parsed, but not validated in patronictl mock_post.return_value.status = 200 result = self.runner.invoke(ctl, ctl_args, input='y') assert result.exit_code == 0 # get restart with the non-200 return code # normal restart, the schedule is actually parsed, but not validated in patronictl mock_post.return_value.status = 204 result = self.runner.invoke(ctl, ctl_args, input='y') assert result.exit_code == 0 # get restart with the non-200 return code # normal restart, the schedule is actually parsed, but not validated in patronictl mock_post.return_value.status = 202 result = self.runner.invoke(ctl, ctl_args, input='y') assert 'Success: restart scheduled' in result.output assert result.exit_code == 0 # get restart with the non-200 return code # normal restart, the schedule is actually parsed, but not validated in patronictl mock_post.return_value.status = 409 result = self.runner.invoke(ctl, ctl_args, input='y') assert 'Failed: another restart is already' in result.output assert result.exit_code == 0 @patch('patroni.ctl.get_dcs') def test_remove(self, mock_get_dcs): mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader result = self.runner.invoke(ctl, ['remove', 'dummy'], input='\n') assert 'For Citus clusters the --group must me specified' in result.output result = self.runner.invoke(ctl, ['-k', 'remove', 'alpha', '--group', '0'], input='alpha\nstandby') assert 'Please confirm' in result.output assert 'You are about to remove all' in result.output # Not typing an exact confirmation assert result.exit_code == 1 # leader specified does not match leader of cluster result = self.runner.invoke(ctl, ['remove', 'alpha', '--group', '0'], input='alpha\nYes I am aware\nstandby') assert result.exit_code == 1 # cluster specified on cmdline does not match verification prompt result = self.runner.invoke(ctl, ['remove', 'alpha', '--group', '0'], input='beta\nleader') assert result.exit_code == 1 result = self.runner.invoke(ctl, ['remove', 'alpha', '--group', '0'], input='alpha\nYes I am aware\nleader') assert result.exit_code == 0 def test_ctl(self): self.runner.invoke(ctl, ['list']) result = self.runner.invoke(ctl, ['--help']) assert 'Usage:' in result.output def test_get_any_member(self): for role in self.TEST_ROLES: self.assertIsNone(get_any_member({}, get_cluster_initialized_without_leader(), None, role=role)) m = get_any_member({}, get_cluster_initialized_with_leader(), None, role=role) self.assertEqual(m.name, 'leader') def test_get_all_members(self): for role in self.TEST_ROLES: self.assertEqual(list(get_all_members({}, get_cluster_initialized_without_leader(), None, role=role)), []) r = list(get_all_members({}, get_cluster_initialized_with_leader(), None, role=role)) self.assertEqual(len(r), 1) self.assertEqual(r[0].name, 'leader') r = list(get_all_members({}, get_cluster_initialized_with_leader(), None, role='replica')) self.assertEqual(len(r), 1) self.assertEqual(r[0].name, 'other') self.assertEqual(len(list(get_all_members({}, get_cluster_initialized_without_leader(), None, role='replica'))), 2) @patch('patroni.ctl.get_dcs') def test_members(self, mock_get_dcs): mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader result = self.runner.invoke(ctl, ['list']) assert '127.0.0.1' in result.output assert result.exit_code == 0 assert 'Citus cluster: alpha -' in result.output result = self.runner.invoke(ctl, ['list', '--group', '0']) assert 'Citus cluster: alpha (group: 0, 12345678901) -' in result.output with patch('patroni.ctl.load_config', Mock(return_value={'scope': 'alpha'})): result = self.runner.invoke(ctl, ['list']) assert 'Cluster: alpha (12345678901) -' in result.output with patch('patroni.ctl.load_config', Mock(return_value={})): self.runner.invoke(ctl, ['list']) @patch('patroni.ctl.get_dcs') def test_list_extended(self, mock_get_dcs): mock_get_dcs.return_value = self.e cluster = get_cluster_initialized_with_leader(sync=('leader', 'other')) mock_get_dcs.return_value.get_cluster = Mock(return_value=cluster) result = self.runner.invoke(ctl, ['list', 'dummy', '--extended', '--timestamp']) assert '2100' in result.output assert 'Scheduled restart' in result.output @patch('patroni.ctl.get_dcs') def test_topology(self, mock_get_dcs): mock_get_dcs.return_value = self.e cluster = get_cluster_initialized_with_leader() cascade_member = Member(0, 'cascade', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5437/postgres', 'api_url': 'http://127.0.0.1:8012/patroni', 'state': 'running', 'tags': {'replicatefrom': 'other'}, }) cascade_member_wrong_tags = Member(0, 'wrong_cascade', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5438/postgres', 'api_url': 'http://127.0.0.1:8013/patroni', 'state': 'running', 'tags': {'replicatefrom': 'nonexistinghost'}, }) cluster.members.append(cascade_member) cluster.members.append(cascade_member_wrong_tags) mock_get_dcs.return_value.get_cluster = Mock(return_value=cluster) result = self.runner.invoke(ctl, ['topology', 'dummy']) assert '+\n| 0 | leader | 127.0.0.1:5435 | Leader |' in result.output assert '|\n| 0 | + other | 127.0.0.1:5436 | Replica |' in result.output assert '|\n| 0 | + cascade | 127.0.0.1:5437 | Replica |' in result.output assert '|\n| 0 | + wrong_cascade | 127.0.0.1:5438 | Replica |' in result.output cluster = get_cluster_initialized_without_leader() mock_get_dcs.return_value.get_cluster = Mock(return_value=cluster) result = self.runner.invoke(ctl, ['topology', 'dummy']) assert '+\n| 0 | + leader | 127.0.0.1:5435 | Replica |' in result.output assert '|\n| 0 | + other | 127.0.0.1:5436 | Replica |' in result.output @patch('patroni.ctl.get_dcs') @patch.object(PoolManager, 'request', Mock(return_value=MockResponse())) def test_flush_restart(self, mock_get_dcs): mock_get_dcs.return_value = self.e mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader for role in self.TEST_ROLES: result = self.runner.invoke(ctl, ['-k', 'flush', 'dummy', 'restart', '-r', role], input='y') assert 'No scheduled restart' in result.output result = self.runner.invoke(ctl, ['flush', 'dummy', 'restart', '--force']) assert 'Success: flush scheduled restart' in result.output with patch.object(PoolManager, 'request', return_value=MockResponse(404)): result = self.runner.invoke(ctl, ['flush', 'dummy', 'restart', '--force']) assert 'Failed: flush scheduled restart' in result.output @patch('patroni.ctl.get_dcs') @patch.object(PoolManager, 'request', Mock(return_value=MockResponse())) def test_flush_switchover(self, mock_get_dcs): mock_get_dcs.return_value = self.e mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader result = self.runner.invoke(ctl, ['flush', 'dummy', 'switchover']) assert 'No pending scheduled switchover' in result.output scheduled_at = datetime.now(tzutc) + timedelta(seconds=600) mock_get_dcs.return_value.get_cluster = Mock( return_value=get_cluster_initialized_with_leader(Failover(1, 'a', 'b', scheduled_at))) result = self.runner.invoke(ctl, ['flush', 'dummy', 'switchover']) assert result.output.startswith('Success: ') mock_get_dcs.return_value.manual_failover = Mock() with patch.object(PoolManager, 'request', side_effect=[MockResponse(409), Exception]): result = self.runner.invoke(ctl, ['flush', 'dummy', 'switchover']) assert 'Could not find any accessible member of cluster' in result.output @patch.object(PoolManager, 'request') @patch('patroni.ctl.get_dcs') @patch('patroni.ctl.polling_loop', Mock(return_value=[1])) def test_pause_cluster(self, mock_get_dcs, mock_post): mock_get_dcs.return_value = self.e mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader mock_post.return_value.status = 500 result = self.runner.invoke(ctl, ['pause', 'dummy']) assert 'Failed' in result.output mock_post.return_value.status = 200 with patch('patroni.config.GlobalConfig.is_paused', PropertyMock(return_value=True)): result = self.runner.invoke(ctl, ['pause', 'dummy']) assert 'Cluster is already paused' in result.output result = self.runner.invoke(ctl, ['pause', 'dummy', '--wait']) assert "'pause' request sent" in result.output mock_get_dcs.return_value.get_cluster = Mock(side_effect=[get_cluster_initialized_with_leader(), get_cluster(None, None, [], None, None)]) self.runner.invoke(ctl, ['pause', 'dummy', '--wait']) member = Member(1, 'other', 28, {}) mock_get_dcs.return_value.get_cluster = Mock(side_effect=[get_cluster_initialized_with_leader(), get_cluster(None, None, [member], None, None)]) self.runner.invoke(ctl, ['pause', 'dummy', '--wait']) @patch.object(PoolManager, 'request') @patch('patroni.ctl.get_dcs') def test_resume_cluster(self, mock_get_dcs, mock_post): mock_get_dcs.return_value = self.e mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader mock_post.return_value.status = 200 with patch('patroni.config.GlobalConfig.is_paused', PropertyMock(return_value=False)): result = self.runner.invoke(ctl, ['resume', 'dummy']) assert 'Cluster is not paused' in result.output with patch('patroni.config.GlobalConfig.is_paused', PropertyMock(return_value=True)): result = self.runner.invoke(ctl, ['resume', 'dummy']) assert 'Success' in result.output mock_post.return_value.status = 500 result = self.runner.invoke(ctl, ['resume', 'dummy']) assert 'Failed' in result.output mock_post.side_effect = Exception result = self.runner.invoke(ctl, ['resume', 'dummy']) assert 'Can not find accessible cluster member' in result.output def test_apply_config_changes(self): config = {"postgresql": {"parameters": {"work_mem": "4MB"}, "use_pg_rewind": True}, "ttl": 30} before_editing = format_config_for_editing(config) # Spaces are allowed and stripped, numbers and booleans are interpreted after_editing, changed_config = apply_config_changes(before_editing, config, ["postgresql.parameters.work_mem = 5MB", "ttl=15", "postgresql.use_pg_rewind=off", 'a.b=c']) self.assertEqual(changed_config, {"a": {"b": "c"}, "postgresql": {"parameters": {"work_mem": "5MB"}, "use_pg_rewind": False}, "ttl": 15}) # postgresql.parameters namespace is flattened after_editing, changed_config = apply_config_changes(before_editing, config, ["postgresql.parameters.work_mem.sub = x"]) self.assertEqual(changed_config, {"postgresql": {"parameters": {"work_mem": "4MB", "work_mem.sub": "x"}, "use_pg_rewind": True}, "ttl": 30}) # Setting to null deletes after_editing, changed_config = apply_config_changes(before_editing, config, ["postgresql.parameters.work_mem=null"]) self.assertEqual(changed_config, {"postgresql": {"use_pg_rewind": True}, "ttl": 30}) after_editing, changed_config = apply_config_changes(before_editing, config, ["postgresql.use_pg_rewind=null", "postgresql.parameters.work_mem=null"]) self.assertEqual(changed_config, {"ttl": 30}) self.assertRaises(PatroniCtlException, apply_config_changes, before_editing, config, ['a']) @patch('sys.stdout.isatty', return_value=False) @patch('patroni.ctl.markup_to_pager') @patch('os.environ.get', return_value=None) @patch('shutil.which', return_value=None) def test_show_diff(self, mock_which, mock_env_get, mock_markup_to_pager, mock_isatty): # no TTY show_diff("foo:\n bar: 1\n", "foo:\n bar: 2\n") mock_markup_to_pager.assert_not_called() # TTY but no PAGER nor executable mock_isatty.return_value = True with self.assertRaises(PatroniCtlException) as e: show_diff("foo:\n bar: 1\n", "foo:\n bar: 2\n") self.assertEqual( str(e.exception), 'No pager could be found. Either set PAGER environment variable with ' 'your pager or install either "less" or "more" in the host.' ) mock_env_get.assert_called_once_with('PAGER') mock_which.assert_has_calls([ mock.call('less'), mock.call('more'), ]) mock_markup_to_pager.assert_not_called() # TTY with PAGER set but invalid mock_env_get.reset_mock() mock_env_get.return_value = 'random' mock_which.reset_mock() with self.assertRaises(PatroniCtlException) as e: show_diff("foo:\n bar: 1\n", "foo:\n bar: 2\n") self.assertEqual( str(e.exception), 'No pager could be found. Either set PAGER environment variable with ' 'your pager or install either "less" or "more" in the host.' ) mock_env_get.assert_called_once_with('PAGER') mock_which.assert_has_calls([ mock.call('random'), mock.call('less'), mock.call('more'), ]) mock_markup_to_pager.assert_not_called() # TTY with valid executable mock_which.side_effect = [None, '/usr/bin/less', None] show_diff("foo:\n bar: 1\n", "foo:\n bar: 2\n") mock_markup_to_pager.assert_called_once() # Test that unicode handling doesn't fail with an exception mock_which.side_effect = [None, '/usr/bin/less', None] show_diff(b"foo:\n bar: \xc3\xb6\xc3\xb6\n".decode('utf-8'), b"foo:\n bar: \xc3\xbc\xc3\xbc\n".decode('utf-8')) @patch('subprocess.call', return_value=1) def test_invoke_editor(self, mock_subprocess_call): os.environ.pop('EDITOR', None) for e in ('', '/bin/vi'): with patch('shutil.which', Mock(return_value=e)): self.assertRaises(PatroniCtlException, invoke_editor, 'foo: bar\n', 'test') @patch('patroni.ctl.get_dcs') def test_show_config(self, mock_get_dcs): mock_get_dcs.return_value = self.e mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader self.runner.invoke(ctl, ['show-config', 'dummy']) @patch('patroni.ctl.get_dcs') @patch('subprocess.call', Mock(return_value=0)) def test_edit_config(self, mock_get_dcs): mock_get_dcs.return_value = self.e mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader mock_get_dcs.return_value.set_config_value = Mock(return_value=False) os.environ['EDITOR'] = 'true' self.runner.invoke(ctl, ['edit-config', 'dummy']) self.runner.invoke(ctl, ['edit-config', 'dummy', '-s', 'foo=bar']) self.runner.invoke(ctl, ['edit-config', 'dummy', '--replace', 'postgres0.yml']) self.runner.invoke(ctl, ['edit-config', 'dummy', '--apply', '-'], input='foo: bar') self.runner.invoke(ctl, ['edit-config', 'dummy', '--force', '--apply', '-'], input='foo: bar') mock_get_dcs.return_value.set_config_value.return_value = True self.runner.invoke(ctl, ['edit-config', 'dummy', '--force', '--apply', '-'], input='foo: bar') mock_get_dcs.return_value.get_cluster = Mock(return_value=Cluster.empty()) result = self.runner.invoke(ctl, ['edit-config', 'dummy']) assert result.exit_code == 1 assert 'The config key does not exist in the cluster dummy' in result.output @patch('patroni.ctl.get_dcs') def test_version(self, mock_get_dcs): mock_get_dcs.return_value = self.e mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader with patch.object(PoolManager, 'request') as mocked: result = self.runner.invoke(ctl, ['version']) assert 'patronictl version' in result.output mocked.return_value.data = b'{"patroni":{"version":"1.2.3"},"server_version": 100001}' result = self.runner.invoke(ctl, ['version', 'dummy']) assert '1.2.3' in result.output with patch.object(PoolManager, 'request', Mock(side_effect=Exception)): result = self.runner.invoke(ctl, ['version', 'dummy']) assert 'failed to get version' in result.output @patch('patroni.ctl.get_dcs') def test_history(self, mock_get_dcs): mock_get_dcs.return_value.get_cluster = Mock() mock_get_dcs.return_value.get_cluster.return_value.history.lines = [[1, 67176, 'no recovery target specified']] result = self.runner.invoke(ctl, ['history']) assert 'Reason' in result.output def test_format_pg_version(self): self.assertEqual(format_pg_version(100001), '10.1') self.assertEqual(format_pg_version(90605), '9.6.5') @patch('patroni.ctl.get_dcs') def test_get_members(self, mock_get_dcs): mock_get_dcs.return_value = self.e mock_get_dcs.return_value.get_cluster = get_cluster_not_initialized_without_leader result = self.runner.invoke(ctl, ['reinit', 'dummy']) assert "cluster doesn\'t have any members" in result.output @patch('time.sleep', Mock()) @patch('patroni.ctl.get_dcs') def test_reinit_wait(self, mock_get_dcs): mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader with patch.object(PoolManager, 'request') as mocked: mocked.side_effect = [Mock(data=s, status=200) for s in [b"reinitialize", b'{"state":"creating replica"}', b'{"state":"running"}']] result = self.runner.invoke(ctl, ['reinit', 'alpha', 'other', '--wait'], input='y\ny') self.assertIn("Waiting for reinitialize to complete on: other", result.output) self.assertIn("Reinitialize is completed on: other", result.output) class TestPatronictlPrettyTable(unittest.TestCase): def setUp(self): self.pt = PatronictlPrettyTable(' header', ['foo', 'bar'], hrules=ALL) def test__get_hline(self): expected = '+-----+-----+' self.pt._hrule = expected self.assertEqual(self.pt._hrule, '+ header----+') self.assertFalse(self.pt._is_first_hline()) self.assertEqual(self.pt._hrule, expected) @patch.object(PrettyTable, '_stringify_hrule', Mock(return_value='+-----+-----+')) def test__stringify_hrule(self): self.assertEqual(self.pt._stringify_hrule((), 'top_'), '+ header----+') self.assertFalse(self.pt._is_first_hline()) def test_output(self): self.assertEqual(str(self.pt), '+ header----+\n| foo | bar |\n+-----+-----+') patroni-3.2.2/tests/test_etcd.py000066400000000000000000000407361455170150700167030ustar00rootroot00000000000000import etcd import urllib3.util.connection import socket import unittest from dns.exception import DNSException from mock import Mock, PropertyMock, patch from patroni.dcs.etcd import AbstractDCS, EtcdClient, Cluster, Etcd, EtcdError, DnsCachingResolver from patroni.exceptions import DCSError from patroni.utils import Retry from urllib3.exceptions import ReadTimeoutError from . import SleepException, MockResponse, requests_get def etcd_watch(self, key, index=None, timeout=None, recursive=None): if timeout == 2.0: raise etcd.EtcdWatchTimedOut elif timeout == 5.0: return etcd.EtcdResult('compareAndSwap', {}) elif 5 < timeout <= 10.0: raise etcd.EtcdException elif timeout == 20.0: raise etcd.EtcdEventIndexCleared def etcd_write(self, key, value, **kwargs): if key == '/service/exists/leader': raise etcd.EtcdAlreadyExist if key in ['/service/test/leader', '/patroni/test/leader'] and \ (kwargs.get('prevValue') == 'foo' or not kwargs.get('prevExist', True)): return True raise etcd.EtcdException def etcd_read(self, key, **kwargs): if key == '/service/noleader/': raise DCSError('noleader') elif key == '/service/nocluster/': raise etcd.EtcdKeyNotFound response = {"action": "get", "node": {"key": "/service/batman5", "dir": True, "nodes": [ {"key": "/service/batman5/1", "dir": True, "nodes": [ {"key": "/service/batman5/1/initialize", "value": "2164261704", "modifiedIndex": 20729, "createdIndex": 20729}], "modifiedIndex": 20437, "createdIndex": 20437}, {"key": "/service/batman5/config", "value": '{"synchronous_mode": 0, "failsafe_mode": true}', "modifiedIndex": 1582, "createdIndex": 1582}, {"key": "/service/batman5/failover", "value": "", "modifiedIndex": 1582, "createdIndex": 1582}, {"key": "/service/batman5/initialize", "value": "postgresql0", "modifiedIndex": 1582, "createdIndex": 1582}, {"key": "/service/batman5/leader", "value": "postgresql1", "expiration": "2015-05-15T09:11:00.037397538Z", "ttl": 21, "modifiedIndex": 20728, "createdIndex": 20434}, {"key": "/service/batman5/optime", "dir": True, "nodes": [ {"key": "/service/batman5/optime/leader", "value": "2164261704", "modifiedIndex": 20729, "createdIndex": 20729}], "modifiedIndex": 20437, "createdIndex": 20437}, {"key": "/service/batman5/sync", "value": '{"leader": "leader"}', "modifiedIndex": 1582, "createdIndex": 1582}, {"key": "/service/batman5/members", "dir": True, "nodes": [ {"key": "/service/batman5/members/postgresql1", "value": "postgres://replicator:rep-pass@127.0.0.1:5434/postgres" + "?application_name=http://127.0.0.1:8009/patroni", "expiration": "2015-05-15T09:10:59.949384522Z", "ttl": 21, "modifiedIndex": 20727, "createdIndex": 20727}, {"key": "/service/batman5/members/postgresql0", "value": "postgres://replicator:rep-pass@127.0.0.1:5433/postgres" + "?application_name=http://127.0.0.1:8008/patroni", "expiration": "2015-05-15T09:11:09.611860899Z", "ttl": 30, "modifiedIndex": 20730, "createdIndex": 20730}], "modifiedIndex": 1581, "createdIndex": 1581}, {"key": "/service/batman5/failsafe", "value": '{', "modifiedIndex": 1582, "createdIndex": 1582}, {"key": "/service/batman5/status", "value": '{"optime":2164261704,"slots":{"ls":12345}}', "modifiedIndex": 1582, "createdIndex": 1582}], "modifiedIndex": 1581, "createdIndex": 1581}} if key == '/service/legacy/': response['node']['nodes'].pop() if key == '/service/broken/': response['node']['nodes'][-1]['value'] = '{' result = etcd.EtcdResult(**response) result.etcd_index = 0 return result def dns_query(name, _): if '-server' not in name or '-ssl' in name: return [] if name == '_etcd-server._tcp.blabla': return [] elif name == '_etcd-server._tcp.exception': raise DNSException() srv = Mock() srv.port = 2380 srv.target.to_text.return_value = \ 'localhost' if name in ['_etcd-server._tcp.foobar', '_etcd-server-baz._tcp.foobar'] else '127.0.0.1' return [srv] def socket_getaddrinfo(*args): if args[0] in ('ok', 'localhost', '127.0.0.1'): return [(socket.AF_INET, 1, 6, '', ('127.0.0.1', 0)), (socket.AF_INET6, 1, 6, '', ('::1', 0))] raise socket.gaierror def http_request(method, url, **kwargs): if url == 'http://localhost:2379/timeout': raise ReadTimeoutError(None, None, None) ret = MockResponse() if url == 'http://localhost:2379/v2/machines': ret.content = 'http://localhost:2379,http://localhost:4001' elif url == 'http://localhost:4001/v2/machines': ret.content = '' elif url != 'http://localhost:2379/': raise socket.error return ret class TestDnsCachingResolver(unittest.TestCase): @patch('time.sleep', Mock(side_effect=SleepException)) @patch('socket.getaddrinfo', Mock(side_effect=socket.gaierror)) def test_run(self): r = DnsCachingResolver() r._invoke_excepthook = Mock() self.assertIsNone(r.resolve_async('', 0)) r.join() @patch('dns.resolver.query', dns_query) @patch('socket.getaddrinfo', socket_getaddrinfo) @patch('patroni.dcs.etcd.requests_get', requests_get) class TestClient(unittest.TestCase): @patch('dns.resolver.query', dns_query) @patch('socket.getaddrinfo', socket_getaddrinfo) @patch('patroni.dcs.etcd.requests_get', requests_get) @patch.object(EtcdClient, '_get_machines_list', Mock(return_value=['http://localhost:2379', 'http://localhost:4001'])) def setUp(self): self.etcd = Etcd({'namespace': '/patroni/', 'ttl': 30, 'retry_timeout': 3, 'srv': 'test', 'scope': 'test', 'name': 'foo'}) self.client = self.etcd._client self.client.http.request = http_request self.client.http.request_encode_body = http_request def test_machines(self): self.client._base_uri = 'http://localhost:4002' self.client._machines_cache = ['http://localhost:4002', 'http://localhost:2379'] self.assertIsNotNone(self.client.machines) self.client._base_uri = 'http://localhost:4001' self.client._machines_cache = ['http://localhost:4001'] self.client._update_machines_cache = True machines = None try: machines = self.client.machines self.assertFail() except Exception: self.assertIsNone(machines) @patch.object(EtcdClient, '_get_machines_list', Mock(return_value=['http://localhost:4001', 'http://localhost:2379'])) def test_api_execute(self): self.client._base_uri = 'http://localhost:4001' self.assertRaises(etcd.EtcdException, self.client.api_execute, '/', 'POST', timeout=0) self.client._base_uri = 'http://localhost:4001' rtry = Retry(deadline=10, max_delay=1, max_tries=-1, retry_exceptions=(etcd.EtcdLeaderElectionInProgress,)) rtry(self.client.api_execute, '/', 'POST', timeout=0, params={'retry': rtry}) self.client._machines_cache_updated = 0 self.client.api_execute('/', 'POST', timeout=0) self.client._machines_cache = [self.client._base_uri] self.assertRaises(etcd.EtcdWatchTimedOut, self.client.api_execute, '/timeout', 'POST', params={'wait': 'true'}) self.assertRaises(etcd.EtcdWatchTimedOut, self.client.api_execute, '/timeout', 'POST', params={'wait': 'true'}) with patch.object(EtcdClient, '_calculate_timeouts', Mock(side_effect=[(1, 1, 0), (1, 1, 0), (0, 1, 0)])), \ patch.object(EtcdClient, '_load_machines_cache', Mock(side_effect=Exception)): self.client.http.request = Mock(side_effect=socket.error) self.assertRaises(etcd.EtcdException, rtry, self.client.api_execute, '/', 'GET', params={'retry': rtry}) with patch.object(EtcdClient, '_calculate_timeouts', Mock(side_effect=[(1, 1, 0), (1, 1, 0), (0, 1, 0)])), \ patch.object(EtcdClient, '_load_machines_cache', Mock(return_value=True)): self.assertRaises(etcd.EtcdException, rtry, self.client.api_execute, '/', 'GET', params={'retry': rtry}) with patch.object(EtcdClient, '_do_http_request', Mock(side_effect=etcd.EtcdException)): self.client._read_timeout = 0.01 self.assertRaises(etcd.EtcdException, self.client.api_execute, '/', 'GET') def test_get_srv_record(self): self.assertEqual(self.client.get_srv_record('_etcd-server._tcp.blabla'), []) self.assertEqual(self.client.get_srv_record('_etcd-server._tcp.exception'), []) def test__get_machines_cache_from_srv(self): self.client._get_machines_cache_from_srv('foobar') self.client._get_machines_cache_from_srv('foobar', 'baz') self.client.get_srv_record = Mock(return_value=[('localhost', 2380)]) self.client._get_machines_cache_from_srv('blabla') def test__get_machines_cache_from_dns(self): self.client._get_machines_cache_from_dns('error', 2379) @patch.object(EtcdClient, '_get_machines_list', Mock(side_effect=etcd.EtcdConnectionFailed)) def test__refresh_machines_cache(self): self.assertFalse(self.client._refresh_machines_cache()) self.assertRaises(etcd.EtcdException, self.client._refresh_machines_cache, ['http://localhost:2379']) def test__load_machines_cache(self): self.client._config = {} self.assertRaises(Exception, self.client._load_machines_cache) self.client._config = {'srv': 'blabla'} self.assertRaises(etcd.EtcdException, self.client._load_machines_cache) @patch.object(socket.socket, 'connect') def test_create_connection_patched(self, mock_connect): self.assertRaises(socket.error, urllib3.util.connection.create_connection, ('fail', 2379)) urllib3.util.connection.create_connection(('[localhost]', 2379)) mock_connect.side_effect = socket.error self.assertRaises(socket.error, urllib3.util.connection.create_connection, ('[localhost]', 2379), timeout=1, source_address=('localhost', 53333), socket_options=[(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)]) def test___del__(self): self.client.http.clear = Mock(side_effect=TypeError) del self.client @patch('patroni.dcs.etcd.requests_get', requests_get) @patch('socket.getaddrinfo', socket_getaddrinfo) @patch.object(etcd.Client, 'write', etcd_write) @patch.object(etcd.Client, 'read', etcd_read) @patch.object(etcd.Client, 'delete', Mock(side_effect=etcd.EtcdException)) class TestEtcd(unittest.TestCase): @patch('socket.getaddrinfo', socket_getaddrinfo) @patch.object(EtcdClient, '_get_machines_list', Mock(return_value=['http://localhost:2379', 'http://localhost:4001'])) def setUp(self): self.etcd = Etcd({'namespace': '/patroni/', 'ttl': 30, 'retry_timeout': 10, 'host': 'localhost:2379', 'scope': 'test', 'name': 'foo'}) def test_base_path(self): self.assertEqual(self.etcd._base_path, '/patroni/test') @patch('dns.resolver.query', dns_query) @patch('time.sleep', Mock(side_effect=SleepException)) @patch.object(EtcdClient, '_get_machines_list', Mock(side_effect=etcd.EtcdConnectionFailed)) def test_get_etcd_client(self): self.assertRaises(SleepException, self.etcd.get_etcd_client, {'discovery_srv': 'test', 'retry_timeout': 10, 'cacert': '1', 'key': '1', 'cert': 1}, EtcdClient) self.assertRaises(SleepException, self.etcd.get_etcd_client, {'url': 'https://test:2379', 'retry_timeout': 10}, EtcdClient) self.assertRaises(SleepException, self.etcd.get_etcd_client, {'hosts': 'foo:4001,bar', 'retry_timeout': 10}, EtcdClient) with patch.object(EtcdClient, '_get_machines_list', Mock(return_value=[])): self.assertRaises(SleepException, self.etcd.get_etcd_client, {'proxy': 'https://user:password@test:2379', 'retry_timeout': 10}, EtcdClient) def test_get_cluster(self): cluster = self.etcd.get_cluster() self.assertIsInstance(cluster, Cluster) self.etcd._base_path = '/service/legacy' self.assertIsInstance(self.etcd.get_cluster(), Cluster) self.etcd._base_path = '/service/broken' self.assertIsInstance(self.etcd.get_cluster(), Cluster) self.etcd._base_path = '/service/nocluster' cluster = self.etcd.get_cluster() self.assertIsInstance(cluster, Cluster) self.assertIsNone(cluster.leader) self.etcd._base_path = '/service/noleader' self.assertRaises(EtcdError, self.etcd.get_cluster) def test__get_citus_cluster(self): self.etcd._citus_group = '0' cluster = self.etcd.get_cluster() self.assertIsInstance(cluster, Cluster) self.assertIsInstance(cluster.workers[1], Cluster) self.etcd._base_path = '/service/nocluster' self.assertTrue(self.etcd.get_cluster().is_empty()) def test_touch_member(self): self.assertFalse(self.etcd.touch_member('')) def test_take_leader(self): self.assertFalse(self.etcd.take_leader()) def test_attempt_to_acquire_leader(self): self.etcd._base_path = '/service/exists' self.assertFalse(self.etcd.attempt_to_acquire_leader()) self.etcd._base_path = '/service/failed' self.assertFalse(self.etcd.attempt_to_acquire_leader()) with patch.object(EtcdClient, 'write', Mock(side_effect=[etcd.EtcdConnectionFailed, Exception])): self.assertRaises(EtcdError, self.etcd.attempt_to_acquire_leader) self.assertRaises(EtcdError, self.etcd.attempt_to_acquire_leader) @patch.object(Cluster, 'min_version', PropertyMock(return_value=(2, 0))) def test_write_leader_optime(self): self.etcd.get_cluster() self.etcd.write_leader_optime('0') def test_update_leader(self): leader = self.etcd.get_cluster().leader self.assertTrue(self.etcd.update_leader(leader, None, failsafe={'foo': 'bar'})) with patch.object(etcd.Client, 'write', Mock(side_effect=[etcd.EtcdConnectionFailed, etcd.EtcdClusterIdChanged, Exception])): self.assertRaises(EtcdError, self.etcd.update_leader, leader, None) self.assertFalse(self.etcd.update_leader(leader, None)) self.assertRaises(EtcdError, self.etcd.update_leader, leader, None) with patch.object(etcd.Client, 'write', Mock(side_effect=etcd.EtcdKeyNotFound)): self.assertFalse(self.etcd.update_leader(leader, None)) def test_initialize(self): self.assertFalse(self.etcd.initialize()) def test_cancel_initializion(self): self.assertFalse(self.etcd.cancel_initialization()) def test_delete_leader(self): self.assertFalse(self.etcd.delete_leader(self.etcd.get_cluster().leader)) def test_delete_cluster(self): self.assertFalse(self.etcd.delete_cluster()) @patch('time.sleep', Mock(side_effect=SleepException)) @patch.object(etcd.Client, 'watch', etcd_watch) def test_watch(self): self.etcd.watch(None, 0) self.etcd.get_cluster() self.etcd.watch(20729, 1.5) with patch('time.sleep', Mock()): self.etcd.watch(20729, 4.5) with patch.object(AbstractDCS, 'watch', Mock()): self.assertTrue(self.etcd.watch(20729, 19.5)) self.assertRaises(SleepException, self.etcd.watch, 20729, 9.5) def test_other_exceptions(self): self.etcd.retry = Mock(side_effect=AttributeError('foo')) self.assertRaises(EtcdError, self.etcd.cancel_initialization) def test_set_ttl(self): self.etcd.set_ttl(20) self.assertTrue(self.etcd.watch(None, 1)) def test_sync_state(self): self.assertIsNone(self.etcd.write_sync_state('leader', None)) self.assertFalse(self.etcd.delete_sync_state()) def test_set_history_value(self): self.assertFalse(self.etcd.set_history_value('{}')) def test_last_seen(self): self.assertIsNotNone(self.etcd.last_seen) patroni-3.2.2/tests/test_etcd3.py000066400000000000000000000367751455170150700167760ustar00rootroot00000000000000import etcd import json import unittest import urllib3 from mock import Mock, PropertyMock, patch from patroni.dcs.etcd import DnsCachingResolver from patroni.dcs.etcd3 import PatroniEtcd3Client, Cluster, Etcd3, Etcd3Client, \ Etcd3Error, Etcd3ClientError, ReAuthenticateMode, RetryFailedError, InvalidAuthToken, Unavailable, \ Unknown, UnsupportedEtcdVersion, UserEmpty, AuthFailed, AuthOldRevision, base64_encode from threading import Thread from . import SleepException, MockResponse def mock_urlopen(self, method, url, **kwargs): ret = MockResponse() if method == 'GET' and url.endswith('/version'): ret.content = '{"etcdserver": "3.3.13", "etcdcluster": "3.3.0"}' elif method != 'POST': raise Exception('Unexpected request method: {0} {1} {2}'.format(method, url, kwargs)) elif url.endswith('/cluster/member/list'): ret.content = '{"members":[{"clientURLs":["http://localhost:2379", "http://localhost:4001"]}]}' elif url.endswith('/auth/authenticate'): ret.content = '{"token":"authtoken"}' elif url.endswith('/lease/grant'): ret.content = '{"ID": "123"}' elif url.endswith('/lease/keepalive'): ret.content = '{"result":{"TTL":30}}' elif url.endswith('/kv/range'): ret.content = json.dumps({ "header": {"revision": "1"}, "kvs": [ {"key": base64_encode('/patroni/test/1/initialize'), "value": base64_encode('12345'), "mod_revision": '1'}, {"key": base64_encode('/patroni/test/leader'), "value": base64_encode('foo'), "lease": "bla", "mod_revision": '1'}, {"key": base64_encode('/patroni/test/members/foo'), "value": base64_encode('{}'), "lease": "123", "mod_revision": '1'}, {"key": base64_encode('/patroni/test/members/bar'), "value": base64_encode('{"version":"1.6.5"}'), "lease": "123", "mod_revision": '1'}, {"key": base64_encode('/patroni/test/failover'), "value": base64_encode('{}'), "mod_revision": '1'}, {"key": base64_encode('/patroni/test/failsafe'), "value": base64_encode('{'), "mod_revision": '1'} ] }) elif url.endswith('/watch'): key = base64_encode('/patroni/test/config') ret.read_chunked = Mock(return_value=[json.dumps({ 'result': {'events': [ {'kv': {'key': key, 'value': base64_encode('bar'), 'mod_revision': '2'}}, {'kv': {'key': key, 'value': base64_encode('buzz'), 'mod_revision': '3'}}, {'type': 'DELETE', 'kv': {'key': key, 'mod_revision': '4'}}, {'kv': {'key': base64_encode('/patroni/test/optime/leader'), 'value': base64_encode('1234567'), 'mod_revision': '5'}}, ]} })[:-1].encode('utf-8'), b'}{"error":{"grpc_code":14,"message":"","http_code":503}}']) elif url.endswith('/kv/put') or url.endswith('/kv/txn'): if base64_encode('/patroni/test/sync') in kwargs['body']: ret.content = '{"header":{"revision":"1"},"succeeded":true}' else: ret.status_code = 400 ret.content = '{"code":5,"error":"etcdserver: requested lease not found"}' elif not url.endswith('/kv/deleterange'): raise Exception('Unexpected url: {0} {1} {2}'.format(method, url, kwargs)) return ret class TestEtcd3Client(unittest.TestCase): @patch.object(Thread, 'start', Mock()) @patch.object(urllib3.PoolManager, 'urlopen', mock_urlopen) def test_authenticate(self): etcd3 = Etcd3Client({'host': '127.0.0.1', 'port': 2379, 'use_proxies': True, 'retry_timeout': 10}, DnsCachingResolver()) self.assertIsNotNone(etcd3._cluster_version) class BaseTestEtcd3(unittest.TestCase): @patch.object(Thread, 'start', Mock()) @patch.object(urllib3.PoolManager, 'urlopen', mock_urlopen) def setUp(self): self.etcd3 = Etcd3({'namespace': '/patroni/', 'ttl': 30, 'retry_timeout': 10, 'host': 'localhost:2378', 'scope': 'test', 'name': 'foo', 'username': 'etcduser', 'password': 'etcdpassword'}) self.client = self.etcd3._client self.kv_cache = self.client._kv_cache class TestKVCache(BaseTestEtcd3): @patch.object(urllib3.PoolManager, 'urlopen', mock_urlopen) @patch.object(Etcd3Client, 'watchprefix', Mock(return_value=urllib3.response.HTTPResponse())) def test__build_cache(self): self.kv_cache._build_cache() def test__do_watch(self): self.client.watchprefix = Mock(return_value=False) self.assertRaises(AttributeError, self.kv_cache._do_watch, '1') @patch('time.sleep', Mock(side_effect=SleepException)) @patch('patroni.dcs.etcd3.KVCache._build_cache', Mock(side_effect=Exception)) def test_run(self): self.assertRaises(SleepException, self.kv_cache.run) @patch.object(urllib3.response.HTTPResponse, 'read_chunked', Mock(return_value=[b'{"error":{"grpc_code":14,"message":"","http_code":503}}'])) @patch.object(Etcd3Client, 'watchprefix', Mock(return_value=urllib3.response.HTTPResponse())) def test_kill_stream(self): self.assertRaises(Unavailable, self.kv_cache._do_watch, '1') with patch.object(urllib3.response.HTTPResponse, 'connection') as mock_conn: self.kv_cache.kill_stream() mock_conn.sock.close.side_effect = Exception self.kv_cache.kill_stream() type(mock_conn).sock = PropertyMock(side_effect=Exception) self.kv_cache.kill_stream() class TestPatroniEtcd3Client(BaseTestEtcd3): @patch('patroni.dcs.etcd3.Etcd3Client.authenticate', Mock(side_effect=AuthFailed)) def test__init__(self): self.assertRaises(SystemExit, self.setUp) @patch.object(urllib3.PoolManager, 'urlopen') def test_call_rpc(self, mock_urlopen): request = {'key': base64_encode('/patroni/test/leader')} mock_urlopen.return_value = MockResponse() mock_urlopen.return_value.content = '{"succeeded":true,"header":{"revision":"1"}}' self.client.call_rpc('/kv/put', request) self.client.call_rpc('/kv/deleterange', request) @patch.object(urllib3.PoolManager, 'urlopen') def test_txn(self, mock_urlopen): mock_urlopen.return_value = MockResponse() mock_urlopen.return_value.content = '{"header":{"revision":"1"}}' self.client.txn({'target': 'MOD', 'mod_revision': '1'}, {'request_delete_range': {'key': base64_encode('/patroni/test/leader')}}) @patch('time.time', Mock(side_effect=[1, 10.9, 100])) def test__wait_cache(self): with self.kv_cache.condition: self.assertRaises(RetryFailedError, self.client._wait_cache, 10) @patch.object(urllib3.PoolManager, 'urlopen') def test__restart_watcher(self, mock_urlopen): mock_urlopen.return_value = MockResponse() mock_urlopen.return_value.status_code = 400 mock_urlopen.return_value.content = '{"code":9,"error":"etcdserver: authentication is not enabled"}' self.client.authenticate() @patch.object(urllib3.PoolManager, 'urlopen') def test__handle_auth_errors(self, mock_urlopen): mock_urlopen.return_value = MockResponse() mock_urlopen.return_value.content = '{"code":3,"error":"etcdserver: user name is empty"}' mock_urlopen.return_value.status_code = 403 self.client._cluster_version = (3, 1, 5) self.assertRaises(UnsupportedEtcdVersion, self.client.deleteprefix, 'foo') self.client._cluster_version = (3, 3, 13) self.assertRaises(UserEmpty, self.client.deleteprefix, 'foo') mock_urlopen.return_value.content = '{"code":16,"error":"etcdserver: invalid auth token"}' self.assertRaises(InvalidAuthToken, self.client.deleteprefix, 'foo') with patch.object(PatroniEtcd3Client, 'authenticate', Mock(return_value=True)): retry = self.etcd3._retry.copy() with patch('time.time', Mock(side_effect=[0, 10, 20, 30, 40])): self.assertRaises(InvalidAuthToken, retry, self.client.deleteprefix, 'foo', retry=retry) self.client.username = None self.client._reauthenticate_reason = ReAuthenticateMode.NOT_REQUIRED retry = self.etcd3._retry.copy() self.assertRaises(InvalidAuthToken, retry, self.client.deleteprefix, 'foo', retry=retry) mock_urlopen.return_value.content = '{"code":3,"error":"etcdserver: revision of auth store is old"}' self.client._reauthenticate_reason = ReAuthenticateMode.NOT_REQUIRED self.assertRaises(AuthOldRevision, retry, self.client.deleteprefix, 'foo', retry=retry) def test__handle_server_response(self): response = MockResponse() response.content = '{"code":0,"error":"' self.assertRaises(etcd.EtcdException, self.client._handle_server_response, response) response.status_code = 400 self.assertRaises(Unknown, self.client._handle_server_response, response) response.content = '{"error":{"grpc_code":0,"message":"","http_code":400}}' try: self.client._handle_server_response(response) except Unknown as e: self.assertEqual(e.as_dict(), {'code': 2, 'codeText': 'OK', 'error': u'', 'status': 400}) @patch.object(urllib3.PoolManager, 'urlopen') def test__ensure_version_prefix(self, mock_urlopen): self.client.version_prefix = None mock_urlopen.return_value = MockResponse() mock_urlopen.return_value.content = '{"etcdserver": "3.0.3", "etcdcluster": "3.0.0"}' self.assertRaises(UnsupportedEtcdVersion, self.client._ensure_version_prefix, '') mock_urlopen.return_value.content = '{"etcdserver": "3.0.4", "etcdcluster": "3.0.0"}' self.client._ensure_version_prefix('') self.assertEqual(self.client.version_prefix, '/v3alpha') mock_urlopen.return_value.content = '{"etcdserver": "3.4.4", "etcdcluster": "3.4.0"}' self.client._ensure_version_prefix('') self.assertEqual(self.client.version_prefix, '/v3') @patch.object(urllib3.PoolManager, 'urlopen', mock_urlopen) class TestEtcd3(BaseTestEtcd3): @patch.object(Thread, 'start', Mock()) @patch.object(urllib3.PoolManager, 'urlopen', mock_urlopen) def setUp(self): super(TestEtcd3, self).setUp() # self.assertRaises(AttributeError, self.kv_cache._build_cache) self.kv_cache._build_cache() self.kv_cache._is_ready = True self.etcd3.get_cluster() def test_get_cluster(self): self.assertIsInstance(self.etcd3.get_cluster(), Cluster) self.client._kv_cache = None with patch.object(urllib3.PoolManager, 'urlopen') as mock_urlopen: mock_urlopen.return_value = MockResponse() mock_urlopen.return_value.content = json.dumps({ "header": {"revision": "1"}, "kvs": [ {"key": base64_encode('/patroni/test/status'), "value": base64_encode('{"optime":1234567,"slots":{"ls":12345}}'), "mod_revision": '1'} ] }) self.assertIsInstance(self.etcd3.get_cluster(), Cluster) mock_urlopen.return_value.content = json.dumps({ "header": {"revision": "1"}, "kvs": [ {"key": base64_encode('/patroni/test/status'), "value": base64_encode('{'), "mod_revision": '1'} ] }) self.assertIsInstance(self.etcd3.get_cluster(), Cluster) mock_urlopen.side_effect = UnsupportedEtcdVersion('') self.assertRaises(UnsupportedEtcdVersion, self.etcd3.get_cluster) mock_urlopen.side_effect = SleepException() self.assertRaises(Etcd3Error, self.etcd3.get_cluster) def test__get_citus_cluster(self): self.etcd3._citus_group = '0' cluster = self.etcd3.get_cluster() self.assertIsInstance(cluster, Cluster) self.assertIsInstance(cluster.workers[1], Cluster) def test_touch_member(self): self.etcd3.touch_member({}) self.etcd3._lease = 'bla' self.etcd3.touch_member({}) with patch.object(PatroniEtcd3Client, 'lease_grant', Mock(side_effect=Etcd3ClientError)): self.etcd3.touch_member({}) def test__update_leader(self): leader = self.etcd3.get_cluster().leader self.etcd3._lease = None with patch.object(Etcd3Client, 'txn', Mock(return_value={'succeeded': True})): self.etcd3.update_leader(leader, '123', failsafe={'foo': 'bar'}) self.etcd3._last_lease_refresh = 0 self.etcd3.update_leader(leader, '124') with patch.object(PatroniEtcd3Client, 'lease_keepalive', Mock(return_value=True)), \ patch('time.time', Mock(side_effect=[0, 100, 200, 300])): self.assertRaises(Etcd3Error, self.etcd3.update_leader, leader, '126') self.etcd3._lease = leader.session self.etcd3.update_leader(leader, '124') self.etcd3._last_lease_refresh = 0 with patch.object(PatroniEtcd3Client, 'lease_keepalive', Mock(side_effect=Unknown)): self.assertFalse(self.etcd3.update_leader(leader, '125')) def test_take_leader(self): self.assertFalse(self.etcd3.take_leader()) def test_attempt_to_acquire_leader(self): self.assertFalse(self.etcd3.attempt_to_acquire_leader()) with patch('time.time', Mock(side_effect=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100, 200])): self.assertRaises(Etcd3Error, self.etcd3.attempt_to_acquire_leader) with patch('time.time', Mock(side_effect=[0, 100, 200, 300, 400])): self.assertRaises(Etcd3Error, self.etcd3.attempt_to_acquire_leader) with patch.object(PatroniEtcd3Client, 'put', Mock(return_value=False)): self.assertFalse(self.etcd3.attempt_to_acquire_leader()) def test_set_ttl(self): self.etcd3.set_ttl(20) @patch.object(PatroniEtcd3Client, 'lease_keepalive', Mock(return_value=False)) def test_refresh_lease(self): self.etcd3._last_lease_refresh = 0 self.etcd3.refresh_lease() @patch('time.sleep', Mock(side_effect=SleepException)) @patch.object(PatroniEtcd3Client, 'lease_keepalive', Mock(return_value=False)) @patch.object(PatroniEtcd3Client, 'lease_grant', Mock(side_effect=Etcd3ClientError)) def test_create_lease(self): self.etcd3._lease = None self.etcd3._last_lease_refresh = 0 self.assertRaises(SleepException, self.etcd3.create_lease) def test_set_failover_value(self): self.etcd3.set_failover_value('', 1) def test_set_config_value(self): self.etcd3.set_config_value('') def test_initialize(self): self.etcd3.initialize() def test_cancel_initialization(self): self.etcd3.cancel_initialization() def test_delete_leader(self): leader = self.etcd3.get_cluster().leader self.etcd3.delete_leader(leader) self.etcd3._name = 'other' self.etcd3.delete_leader(leader) def test_delete_cluster(self): self.etcd3.delete_cluster() def test_set_history_value(self): self.etcd3.set_history_value('') def test_set_sync_state_value(self): self.etcd3.set_sync_state_value('', 1) def test_delete_sync_state(self): self.etcd3.delete_sync_state('1') def test_watch(self): self.etcd3.set_ttl(10) self.etcd3.watch(None, 0) self.etcd3.watch('5', 0) def test_set_socket_options(self): with patch('socket.SIO_KEEPALIVE_VALS', 1, create=True): self.etcd3.set_socket_options(Mock(), None) patroni-3.2.2/tests/test_exhibitor.py000066400000000000000000000025671455170150700177610ustar00rootroot00000000000000import unittest import urllib3 from mock import Mock, patch from patroni.dcs.exhibitor import ExhibitorEnsembleProvider, Exhibitor from patroni.dcs.zookeeper import ZooKeeperError from . import SleepException, requests_get from .test_zookeeper import MockKazooClient @patch('patroni.dcs.exhibitor.requests_get', requests_get) @patch('time.sleep', Mock(side_effect=SleepException)) class TestExhibitorEnsembleProvider(unittest.TestCase): def test_init(self): self.assertRaises(SleepException, ExhibitorEnsembleProvider, ['localhost'], 8181) def test_poll(self): self.assertFalse(ExhibitorEnsembleProvider(['exhibitor'], 8181).poll()) class TestExhibitor(unittest.TestCase): @patch('urllib3.PoolManager.request', Mock(return_value=urllib3.HTTPResponse( status=200, body=b'{"servers":["127.0.0.1","127.0.0.2","127.0.0.3"],"port":2181}'))) @patch('patroni.dcs.zookeeper.PatroniKazooClient', MockKazooClient) def setUp(self): self.e = Exhibitor({'hosts': ['localhost', 'exhibitor'], 'port': 8181, 'scope': 'test', 'name': 'foo', 'ttl': 30, 'retry_timeout': 10}) @patch.object(ExhibitorEnsembleProvider, 'poll', Mock(return_value=True)) @patch.object(MockKazooClient, 'get_children', Mock(side_effect=Exception)) def test_get_cluster(self): self.assertRaises(ZooKeeperError, self.e.get_cluster) patroni-3.2.2/tests/test_file_perm.py000066400000000000000000000025151455170150700177170ustar00rootroot00000000000000import unittest import stat from mock import Mock, patch from patroni.file_perm import pg_perm class TestFilePermissions(unittest.TestCase): @patch('os.stat') @patch('os.umask') @patch('patroni.file_perm.logger.error') def test_set_umask(self, mock_logger, mock_umask, mock_stat): mock_umask.side_effect = Exception mock_stat.return_value.st_mode = stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP pg_perm.set_permissions_from_data_directory('test') # umask is called with PG_MODE_MASK_GROUP self.assertEqual(mock_umask.call_args[0][0], stat.S_IWGRP | stat.S_IRWXO) self.assertEqual(mock_logger.call_args[0][0], 'Can not set umask to %03o: %r') mock_umask.reset_mock() mock_stat.return_value.st_mode = stat.S_IRWXU pg_perm.set_permissions_from_data_directory('test') # umask is called with PG_MODE_MASK_OWNER (permissions changed from group to owner) self.assertEqual(mock_umask.call_args[0][0], stat.S_IRWXG | stat.S_IRWXO) @patch('os.stat', Mock(side_effect=FileNotFoundError)) @patch('patroni.file_perm.logger.error') def test_set_permissions_from_data_directory(self, mock_logger): pg_perm.set_permissions_from_data_directory('test') self.assertEqual(mock_logger.call_args[0][0], 'Can not check permissions on %s: %r') patroni-3.2.2/tests/test_ha.py000066400000000000000000002631171455170150700163540ustar00rootroot00000000000000import datetime import etcd import os import sys from mock import Mock, MagicMock, PropertyMock, patch, mock_open from patroni.collections import CaseInsensitiveSet from patroni.config import Config from patroni.dcs import Cluster, ClusterConfig, Failover, Leader, Member, get_dcs, Status, SyncState, TimelineHistory from patroni.dcs.etcd import AbstractEtcdClientWithFailover from patroni.exceptions import DCSError, PostgresConnectionException, PatroniFatalException from patroni.ha import Ha, _MemberStatus from patroni.postgresql import Postgresql from patroni.postgresql.bootstrap import Bootstrap from patroni.postgresql.cancellable import CancellableSubprocess from patroni.postgresql.config import ConfigHandler from patroni.postgresql.postmaster import PostmasterProcess from patroni.postgresql.rewind import Rewind from patroni.postgresql.slots import SlotsHandler from patroni.utils import tzutc from patroni.watchdog import Watchdog from . import PostgresInit, MockPostmaster, psycopg_connect, requests_get from .test_etcd import socket_getaddrinfo, etcd_read, etcd_write SYSID = '12345678901' def true(*args, **kwargs): return True def false(*args, **kwargs): return False def get_cluster(initialize, leader, members, failover, sync, cluster_config=None, failsafe=None): t = datetime.datetime.now().isoformat() history = TimelineHistory(1, '[[1,67197376,"no recovery target specified","' + t + '","foo"]]', [(1, 67197376, 'no recovery target specified', t, 'foo')]) cluster_config = cluster_config or ClusterConfig(1, {'check_timeline': True}, 1) return Cluster(initialize, cluster_config, leader, Status(10, None), members, failover, sync, history, failsafe) def get_cluster_not_initialized_without_leader(cluster_config=None): return get_cluster(None, None, [], None, SyncState.empty(), cluster_config) def get_cluster_bootstrapping_without_leader(cluster_config=None): return get_cluster("", None, [], None, SyncState.empty(), cluster_config) def get_cluster_initialized_without_leader(leader=False, failover=None, sync=None, cluster_config=None, failsafe=False): m1 = Member(0, 'leader', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5435/postgres', 'api_url': 'http://127.0.0.1:8008/patroni', 'xlog_location': 4, 'role': 'primary', 'state': 'running'}) leader = Leader(0, 0, m1 if leader else Member(0, '', 28, {})) m2 = Member(0, 'other', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5436/postgres', 'api_url': 'http://127.0.0.1:8011/patroni', 'state': 'running', 'pause': True, 'tags': {'clonefrom': True}, 'scheduled_restart': {'schedule': "2100-01-01 10:53:07.560445+00:00", 'postgres_version': '99.0.0'}}) syncstate = SyncState(0 if sync else None, sync and sync[0], sync and sync[1]) failsafe = {m.name: m.api_url for m in (m1, m2)} if failsafe else None return get_cluster(SYSID, leader, [m1, m2], failover, syncstate, cluster_config, failsafe) def get_cluster_initialized_with_leader(failover=None, sync=None): return get_cluster_initialized_without_leader(leader=True, failover=failover, sync=sync) def get_cluster_initialized_with_only_leader(failover=None, cluster_config=None): leader = get_cluster_initialized_without_leader(leader=True, failover=failover).leader return get_cluster(True, leader, [leader.member], failover, SyncState.empty(), cluster_config) def get_standby_cluster_initialized_with_only_leader(failover=None, sync=None): return get_cluster_initialized_with_only_leader( cluster_config=ClusterConfig(1, { "standby_cluster": { "host": "localhost", "port": 5432, "primary_slot_name": "", }}, 1) ) def get_cluster_initialized_with_leader_and_failsafe(): return get_cluster_initialized_without_leader(leader=True, failsafe=True, cluster_config=ClusterConfig(1, {'failsafe_mode': True}, 1)) def get_node_status(reachable=True, in_recovery=True, dcs_last_seen=0, timeline=2, wal_position=10, nofailover=False, watchdog_failed=False, failover_priority=1): def fetch_node_status(e): tags = {} if nofailover: tags['nofailover'] = True tags['failover_priority'] = failover_priority return _MemberStatus(e, reachable, in_recovery, wal_position, {'tags': tags, 'watchdog_failed': watchdog_failed, 'dcs_last_seen': dcs_last_seen, 'timeline': timeline}) return fetch_node_status future_restart_time = datetime.datetime.now(tzutc) + datetime.timedelta(days=5) postmaster_start_time = datetime.datetime.now(tzutc) class MockPatroni(object): def __init__(self, p, d): os.environ[Config.PATRONI_CONFIG_VARIABLE] = """ restapi: listen: 0.0.0.0:8008 bootstrap: users: replicator: password: rep-pass options: - replication postgresql: name: foo data_dir: data/postgresql0 pg_rewind: username: postgres password: postgres watchdog: mode: off zookeeper: exhibitor: hosts: [localhost] port: 8181 """ # We rely on sys.argv in Config, so it's necessary to reset # all the extra values that are coming from py.test sys.argv = sys.argv[:1] self.config = Config(None) self.version = '1.5.7' self.postgresql = p self.dcs = d self.api = Mock() self.tags = {'foo': 'bar'} self.nofailover = None self.replicatefrom = None self.api.connection_string = 'http://127.0.0.1:8008' self.clonefrom = None self.nosync = False self.scheduled_restart = {'schedule': future_restart_time, 'postmaster_start_time': str(postmaster_start_time)} self.watchdog = Watchdog(self.config) self.request = lambda *args, **kwargs: requests_get(args[0].api_url, *args[1:], **kwargs) self.failover_priority = 1 def run_async(self, func, args=()): self.reset_scheduled_action() if args: func(*args) else: func() @patch.object(Postgresql, 'is_running', Mock(return_value=MockPostmaster())) @patch.object(Postgresql, 'is_primary', Mock(return_value=True)) @patch.object(Postgresql, 'timeline_wal_position', Mock(return_value=(1, 10, 1))) @patch.object(Postgresql, '_cluster_info_state_get', Mock(return_value=10)) @patch.object(Postgresql, 'slots', Mock(return_value={'l': 100})) @patch.object(Postgresql, 'data_directory_empty', Mock(return_value=False)) @patch.object(Postgresql, 'controldata', Mock(return_value={ 'Database system identifier': SYSID, 'Database cluster state': 'shut down', 'Latest checkpoint location': '0/12345678', "Latest checkpoint's TimeLineID": '2'})) @patch.object(SlotsHandler, 'load_replication_slots', Mock(side_effect=Exception)) @patch.object(ConfigHandler, 'append_pg_hba', Mock()) @patch.object(ConfigHandler, 'write_pgpass', Mock(return_value={})) @patch.object(ConfigHandler, 'write_recovery_conf', Mock()) @patch.object(ConfigHandler, 'write_postgresql_conf', Mock()) @patch.object(Postgresql, 'query', Mock()) @patch.object(Postgresql, 'checkpoint', Mock()) @patch.object(CancellableSubprocess, 'call', Mock(return_value=0)) @patch.object(Postgresql, 'get_replica_timeline', Mock(return_value=2)) @patch.object(Postgresql, 'get_primary_timeline', Mock(return_value=2)) @patch.object(Postgresql, 'get_major_version', Mock(return_value=140000)) @patch.object(Postgresql, 'resume_wal_replay', Mock()) @patch.object(ConfigHandler, 'restore_configuration_files', Mock()) @patch.object(etcd.Client, 'write', etcd_write) @patch.object(etcd.Client, 'read', etcd_read) @patch.object(etcd.Client, 'delete', Mock(side_effect=etcd.EtcdException)) @patch('patroni.postgresql.polling_loop', Mock(return_value=range(1))) @patch('patroni.async_executor.AsyncExecutor.busy', PropertyMock(return_value=False)) @patch('patroni.async_executor.AsyncExecutor.run_async', run_async) @patch('patroni.postgresql.rewind.Thread', Mock()) @patch('patroni.postgresql.citus.CitusHandler.start', Mock()) @patch('subprocess.call', Mock(return_value=0)) @patch('time.sleep', Mock()) class TestHa(PostgresInit): @patch('socket.getaddrinfo', socket_getaddrinfo) @patch('patroni.dcs.dcs_modules', Mock(return_value=['patroni.dcs.etcd'])) @patch.object(etcd.Client, 'read', etcd_read) @patch.object(AbstractEtcdClientWithFailover, '_get_machines_list', Mock(return_value=['http://remotehost:2379'])) def setUp(self): super(TestHa, self).setUp() self.p.set_state('running') self.p.set_role('replica') self.p.postmaster_start_time = MagicMock(return_value=str(postmaster_start_time)) self.p.can_create_replica_without_replication_connection = MagicMock(return_value=False) self.e = get_dcs({'etcd': {'ttl': 30, 'host': 'ok:2379', 'scope': 'test', 'name': 'foo', 'retry_timeout': 10}, 'citus': {'database': 'citus', 'group': None}}) self.ha = Ha(MockPatroni(self.p, self.e)) self.ha.old_cluster = self.e.get_cluster() self.ha.cluster = get_cluster_initialized_without_leader() self.ha.load_cluster_from_dcs = Mock() def test_update_lock(self): self.ha.is_failsafe_mode = true self.p.last_operation = Mock(side_effect=PostgresConnectionException('')) self.ha.dcs.update_leader = Mock(side_effect=[DCSError(''), Exception]) self.assertRaises(DCSError, self.ha.update_lock) self.assertFalse(self.ha.update_lock(True)) @patch.object(Postgresql, 'received_timeline', Mock(return_value=None)) def test_touch_member(self): self.p._major_version = 110000 self.p.is_primary = false self.p.timeline_wal_position = Mock(return_value=(0, 1, 0)) self.p.replica_cached_timeline = Mock(side_effect=Exception) with patch.object(Postgresql, '_cluster_info_state_get', Mock(return_value='streaming')): self.ha.touch_member() self.p.timeline_wal_position = Mock(return_value=(0, 1, 1)) self.p.set_role('standby_leader') self.ha.touch_member() self.p.set_role('primary') self.ha.dcs.touch_member = true self.ha.touch_member() def test_is_leader(self): self.assertFalse(self.ha.is_leader()) def test_start_as_replica(self): self.p.is_healthy = false self.assertEqual(self.ha.run_cycle(), 'starting as a secondary') @patch('patroni.dcs.etcd.Etcd.initialize', return_value=True) def test_bootstrap_as_standby_leader(self, initialize): self.p.data_directory_empty = true self.ha.cluster = get_cluster_not_initialized_without_leader(cluster_config=ClusterConfig(0, {}, 0)) self.ha.patroni.config._dynamic_configuration = {"standby_cluster": {"port": 5432}} self.assertEqual(self.ha.run_cycle(), 'trying to bootstrap a new standby leader') def test_bootstrap_waiting_for_standby_leader(self): self.p.data_directory_empty = true self.ha.cluster = get_cluster_initialized_without_leader() self.ha.cluster.config.data.update({'standby_cluster': {'port': 5432}}) self.assertEqual(self.ha.run_cycle(), 'waiting for standby_leader to bootstrap') @patch.object(Cluster, 'get_clone_member', Mock(return_value=Member(0, 'test', 1, {'api_url': 'http://127.0.0.1:8011/patroni', 'conn_url': 'postgres://127.0.0.1:5432/postgres'}))) @patch.object(Bootstrap, 'create_replica', Mock(return_value=0)) def test_start_as_cascade_replica_in_standby_cluster(self): self.p.data_directory_empty = true self.ha.cluster = get_standby_cluster_initialized_with_only_leader() self.assertEqual(self.ha.run_cycle(), "trying to bootstrap from replica 'test'") def test_recover_replica_failed(self): self.p.controldata = lambda: {'Database cluster state': 'in recovery', 'Database system identifier': SYSID} self.p.is_running = false self.p.follow = false self.assertEqual(self.ha.run_cycle(), 'starting as a secondary') self.assertEqual(self.ha.run_cycle(), 'failed to start postgres') def test_recover_raft(self): self.p.controldata = lambda: {'Database cluster state': 'in recovery', 'Database system identifier': SYSID} self.p.is_running = false self.p.follow = true self.assertEqual(self.ha.run_cycle(), 'starting as a secondary') self.p.is_running = true ha_dcs_orig_name = self.ha.dcs.__class__.__name__ self.ha.dcs.__class__.__name__ = 'Raft' self.assertEqual(self.ha.run_cycle(), 'started as a secondary') self.ha.dcs.__class__.__name__ = ha_dcs_orig_name def test_recover_former_primary(self): self.p.follow = false self.p.is_running = false self.p.name = 'leader' self.p.set_role('demoted') self.p.controldata = lambda: {'Database cluster state': 'shut down', 'Database system identifier': SYSID} self.ha.cluster = get_cluster_initialized_with_leader() self.assertEqual(self.ha.run_cycle(), 'starting as readonly because i had the session lock') def test_start_primary_after_failure(self): self.p.start = false self.p.is_running = false self.p.name = 'leader' self.p.set_role('primary') self.p.controldata = lambda: {'Database cluster state': 'in production', 'Database system identifier': SYSID} self.ha.cluster = get_cluster_initialized_with_leader() self.assertEqual(self.ha.run_cycle(), 'starting primary after failure') @patch.object(Rewind, 'ensure_clean_shutdown', Mock()) def test_crash_recovery(self): self.ha.has_lock = true self.p.is_running = false self.p.controldata = lambda: {'Database cluster state': 'in production', 'Database system identifier': SYSID} self.assertEqual(self.ha.run_cycle(), 'doing crash recovery in a single user mode') with patch('patroni.async_executor.AsyncExecutor.busy', PropertyMock(return_value=True)), \ patch.object(Ha, 'check_timeline', Mock(return_value=False)): self.ha._async_executor.schedule('doing crash recovery in a single user mode') self.ha.state_handler.cancellable._process = Mock() self.ha._crash_recovery_started -= 600 self.ha.cluster.config.data.update({'maximum_lag_on_failover': 10}) self.ha.global_config = self.ha.patroni.config.get_global_config(self.ha.cluster) self.assertEqual(self.ha.run_cycle(), 'terminated crash recovery because of startup timeout') @patch.object(Rewind, 'ensure_clean_shutdown', Mock()) @patch.object(Rewind, 'rewind_or_reinitialize_needed_and_possible', Mock(return_value=True)) @patch.object(Rewind, 'can_rewind', PropertyMock(return_value=True)) def test_crash_recovery_before_rewind(self): self.p.is_primary = false self.p.is_running = false self.p.controldata = lambda: {'Database cluster state': 'in archive recovery', 'Database system identifier': SYSID} self.ha._rewind.trigger_check_diverged_lsn() self.ha.cluster = get_cluster_initialized_with_leader() self.assertEqual(self.ha.run_cycle(), 'doing crash recovery in a single user mode') @patch.object(Rewind, 'rewind_or_reinitialize_needed_and_possible', Mock(return_value=True)) @patch.object(Rewind, 'can_rewind', PropertyMock(return_value=True)) @patch('os.listdir', Mock(return_value=[])) @patch('patroni.postgresql.rewind.fsync_dir', Mock()) def test_recover_with_rewind(self): self.p.is_running = false self.ha.cluster = get_cluster_initialized_with_leader() self.ha.cluster.leader.member.data.update(version='2.0.2', role='primary') self.ha._rewind.pg_rewind = true self.ha._rewind.check_leader_is_not_in_recovery = true with patch.object(Rewind, 'rewind_or_reinitialize_needed_and_possible', Mock(return_value=True)): self.assertEqual(self.ha.run_cycle(), 'running pg_rewind from leader') with patch.object(Rewind, 'rewind_or_reinitialize_needed_and_possible', Mock(return_value=False)), \ patch.object(Ha, 'is_synchronous_mode', Mock(return_value=True)): self.p.follow = true self.assertEqual(self.ha.run_cycle(), 'starting as a secondary') self.p.is_running = true self.ha.follow = Mock(return_value='fake') self.assertEqual(self.ha.run_cycle(), 'fake') @patch.object(Rewind, 'rewind_or_reinitialize_needed_and_possible', Mock(return_value=True)) @patch.object(Rewind, 'should_remove_data_directory_on_diverged_timelines', PropertyMock(return_value=True)) @patch.object(Bootstrap, 'create_replica', Mock(return_value=1)) def test_recover_with_reinitialize(self): self.p.is_running = false self.ha.cluster = get_cluster_initialized_with_leader() self.assertEqual(self.ha.run_cycle(), 'reinitializing due to diverged timelines') @patch('sys.exit', return_value=1) @patch('patroni.ha.Ha.sysid_valid', MagicMock(return_value=True)) def test_sysid_no_match(self, exit_mock): self.p.controldata = lambda: {'Database cluster state': 'in recovery', 'Database system identifier': '123'} self.ha.run_cycle() exit_mock.assert_called_once_with(1) @patch.object(Cluster, 'is_unlocked', Mock(return_value=False)) def test_start_as_readonly(self): self.p.is_primary = false self.p.is_healthy = true self.ha.has_lock = true self.p.controldata = lambda: {'Database cluster state': 'in production', 'Database system identifier': SYSID} self.assertEqual(self.ha.run_cycle(), 'promoted self to leader because I had the session lock') @patch('patroni.psycopg.connect', psycopg_connect) def test_acquire_lock_as_primary(self): self.assertEqual(self.ha.run_cycle(), 'acquired session lock as a leader') def test_leader_race_stale_primary(self): with patch.object(Postgresql, 'get_primary_timeline', Mock(return_value=1)), \ patch('patroni.ha.logger.warning') as mock_logger: self.assertEqual(self.ha.run_cycle(), 'demoting self because i am not the healthiest node') self.assertEqual(mock_logger.call_args[0][0], 'My timeline %s is behind last known cluster timeline %s') def test_promoted_by_acquiring_lock(self): self.ha.is_healthiest_node = true self.p.is_primary = false self.assertEqual(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') def test_promotion_cancelled_after_pre_promote_failed(self): self.p.is_primary = false self.p._pre_promote = false self.ha._is_healthiest_node = true self.assertEqual(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') self.assertEqual(self.ha.run_cycle(), 'Promotion cancelled because the pre-promote script failed') self.assertEqual(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node') def test_lost_leader_lock_during_promote(self): with patch('patroni.async_executor.AsyncExecutor.busy', PropertyMock(return_value=True)): self.ha._async_executor.schedule('promote') self.assertEqual(self.ha.run_cycle(), 'lost leader before promote') @patch.object(Cluster, 'is_unlocked', Mock(return_value=False)) def test_long_promote(self): self.ha.has_lock = true self.p.is_primary = false self.p.set_role('primary') self.assertEqual(self.ha.run_cycle(), 'no action. I am (postgresql0), the leader with the lock') def test_demote_after_failing_to_obtain_lock(self): self.ha.acquire_lock = false self.assertEqual(self.ha.run_cycle(), 'demoted self after trying and failing to obtain lock') def test_follow_new_leader_after_failing_to_obtain_lock(self): self.ha.is_healthiest_node = true self.ha.acquire_lock = false self.p.is_primary = false self.assertEqual(self.ha.run_cycle(), 'following new leader after trying and failing to obtain lock') def test_demote_because_not_healthiest(self): self.ha.is_healthiest_node = false self.assertEqual(self.ha.run_cycle(), 'demoting self because i am not the healthiest node') def test_follow_new_leader_because_not_healthiest(self): self.ha.is_healthiest_node = false self.p.is_primary = false self.assertEqual(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node') @patch.object(Cluster, 'is_unlocked', Mock(return_value=False)) def test_promote_because_have_lock(self): self.ha.has_lock = true self.p.is_primary = false self.assertEqual(self.ha.run_cycle(), 'promoted self to leader because I had the session lock') def test_promote_without_watchdog(self): self.ha.has_lock = true self.p.is_primary = true with patch.object(Watchdog, 'activate', Mock(return_value=False)): self.assertEqual(self.ha.run_cycle(), 'Demoting self because watchdog could not be activated') self.p.is_primary = false self.assertEqual(self.ha.run_cycle(), 'Not promoting self because watchdog could not be activated') def test_leader_with_lock(self): self.ha.cluster = get_cluster_initialized_with_leader() self.ha.has_lock = true self.assertEqual(self.ha.run_cycle(), 'no action. I am (postgresql0), the leader with the lock') def test_coordinator_leader_with_lock(self): self.ha.cluster = get_cluster_initialized_with_leader() self.ha.has_lock = true self.assertEqual(self.ha.run_cycle(), 'no action. I am (postgresql0), the leader with the lock') @patch.object(Postgresql, '_wait_for_connection_close', Mock()) @patch.object(Cluster, 'is_unlocked', Mock(return_value=False)) def test_demote_because_not_having_lock(self): with patch.object(Watchdog, 'is_running', PropertyMock(return_value=True)): self.assertEqual(self.ha.run_cycle(), 'demoting self because I do not have the lock and I was a leader') @patch.object(Cluster, 'is_unlocked', Mock(return_value=False)) def test_demote_because_update_lock_failed(self): self.ha.has_lock = true self.ha.update_lock = false self.assertEqual(self.ha.run_cycle(), 'demoted self because failed to update leader lock in DCS') with patch.object(Ha, '_get_node_to_follow', Mock(side_effect=DCSError('foo'))): self.assertEqual(self.ha.run_cycle(), 'demoted self because failed to update leader lock in DCS') self.p.is_primary = false self.assertEqual(self.ha.run_cycle(), 'not promoting because failed to update leader lock in DCS') @patch.object(Cluster, 'is_unlocked', Mock(return_value=False)) def test_follow(self): self.p.is_primary = false self.assertEqual(self.ha.run_cycle(), 'no action. I am (postgresql0), a secondary, and following a leader ()') self.ha.patroni.replicatefrom = "foo" self.p.config.check_recovery_conf = Mock(return_value=(True, False)) self.ha.cluster.config.data.update({'slots': {'l': {'database': 'a', 'plugin': 'b'}}}) self.ha.cluster.members[1].data['tags']['replicatefrom'] = 'postgresql0' self.ha.patroni.nofailover = True self.assertEqual(self.ha.run_cycle(), 'no action. I am (postgresql0), a secondary, and following a leader ()') del self.ha.cluster.config.data['slots'] self.ha.cluster.config.data.update({'postgresql': {'use_slots': False}}) self.assertEqual(self.ha.run_cycle(), 'no action. I am (postgresql0), a secondary, and following a leader ()') del self.ha.cluster.config.data['postgresql']['use_slots'] @patch.object(Cluster, 'is_unlocked', Mock(return_value=False)) def test_follow_in_pause(self): self.ha.is_paused = true self.assertEqual(self.ha.run_cycle(), 'PAUSE: continue to run as primary without lock') self.p.is_primary = false self.assertEqual(self.ha.run_cycle(), 'PAUSE: no action. I am (postgresql0)') @patch.object(Rewind, 'rewind_or_reinitialize_needed_and_possible', Mock(return_value=True)) @patch.object(Rewind, 'can_rewind', PropertyMock(return_value=True)) def test_follow_triggers_rewind(self): self.p.is_primary = false self.ha._rewind.trigger_check_diverged_lsn() self.ha.cluster = get_cluster_initialized_with_leader() self.assertEqual(self.ha.run_cycle(), 'running pg_rewind from leader') def test_no_dcs_connection_primary_demote(self): self.ha.load_cluster_from_dcs = Mock(side_effect=DCSError('Etcd is not responding properly')) self.assertEqual(self.ha.run_cycle(), 'demoting self because DCS is not accessible and I was a leader') self.ha._async_executor.schedule('dummy') self.assertEqual(self.ha.run_cycle(), 'demoted self because DCS is not accessible and I was a leader') def test_check_failsafe_topology(self): self.ha.load_cluster_from_dcs = Mock(side_effect=DCSError('Etcd is not responding properly')) self.ha.cluster = get_cluster_initialized_with_leader_and_failsafe() self.ha.global_config = self.ha.patroni.config.get_global_config(self.ha.cluster) self.ha.dcs._last_failsafe = self.ha.cluster.failsafe self.assertEqual(self.ha.run_cycle(), 'demoting self because DCS is not accessible and I was a leader') self.ha.state_handler.name = self.ha.cluster.leader.name self.assertFalse(self.ha.failsafe_is_active()) self.assertEqual(self.ha.run_cycle(), 'continue to run as a leader because failsafe mode is enabled and all members are accessible') self.assertTrue(self.ha.failsafe_is_active()) with patch.object(Postgresql, 'slots', Mock(side_effect=Exception)): self.ha.patroni.request = Mock(side_effect=Exception) self.assertEqual(self.ha.run_cycle(), 'demoting self because DCS is not accessible and I was a leader') self.assertFalse(self.ha.failsafe_is_active()) self.ha.dcs._last_failsafe.clear() self.ha.dcs._last_failsafe[self.ha.cluster.leader.name] = self.ha.cluster.leader.member.api_url self.assertEqual(self.ha.run_cycle(), 'continue to run as a leader because failsafe mode is enabled and all members are accessible') def test_no_dcs_connection_primary_failsafe(self): self.ha.load_cluster_from_dcs = Mock(side_effect=DCSError('Etcd is not responding properly')) self.ha.cluster = get_cluster_initialized_with_leader_and_failsafe() self.ha.global_config = self.ha.patroni.config.get_global_config(self.ha.cluster) self.ha.dcs._last_failsafe = self.ha.cluster.failsafe self.ha.state_handler.name = self.ha.cluster.leader.name self.assertEqual(self.ha.run_cycle(), 'continue to run as a leader because failsafe mode is enabled and all members are accessible') def test_readonly_dcs_primary_failsafe(self): self.ha.cluster = get_cluster_initialized_with_leader_and_failsafe() self.ha.dcs.update_leader = Mock(side_effect=DCSError('Etcd is not responding properly')) self.ha.dcs._last_failsafe = self.ha.cluster.failsafe self.ha.state_handler.name = self.ha.cluster.leader.name self.assertEqual(self.ha.run_cycle(), 'continue to run as a leader because failsafe mode is enabled and all members are accessible') def test_no_dcs_connection_replica_failsafe(self): self.ha.load_cluster_from_dcs = Mock(side_effect=DCSError('Etcd is not responding properly')) self.ha.cluster = get_cluster_initialized_with_leader_and_failsafe() self.ha.global_config = self.ha.patroni.config.get_global_config(self.ha.cluster) self.ha.update_failsafe({'name': 'leader', 'api_url': 'http://127.0.0.1:8008/patroni', 'conn_url': 'postgres://127.0.0.1:5432/postgres', 'slots': {'foo': 1000}}) self.p.is_primary = false self.assertEqual(self.ha.run_cycle(), 'DCS is not accessible') def test_no_dcs_connection_replica_failsafe_not_enabled_but_active(self): self.ha.load_cluster_from_dcs = Mock(side_effect=DCSError('Etcd is not responding properly')) self.ha.cluster = get_cluster_initialized_with_leader() self.ha.update_failsafe({'name': 'leader', 'api_url': 'http://127.0.0.1:8008/patroni', 'conn_url': 'postgres://127.0.0.1:5432/postgres', 'slots': {'foo': 1000}}) self.p.is_primary = false self.assertEqual(self.ha.run_cycle(), 'DCS is not accessible') def test_update_failsafe(self): self.assertRaises(Exception, self.ha.update_failsafe, {}) self.p.set_role('primary') self.assertEqual(self.ha.update_failsafe({}), 'Running as a leader') @patch('time.sleep', Mock()) def test_bootstrap_from_another_member(self): self.ha.cluster = get_cluster_initialized_with_leader() self.assertEqual(self.ha.bootstrap(), 'trying to bootstrap from replica \'other\'') def test_bootstrap_waiting_for_leader(self): self.ha.cluster = get_cluster_initialized_without_leader() self.assertEqual(self.ha.bootstrap(), 'waiting for leader to bootstrap') def test_bootstrap_without_leader(self): self.ha.cluster = get_cluster_initialized_without_leader() self.p.can_create_replica_without_replication_connection = MagicMock(return_value=True) self.assertEqual(self.ha.bootstrap(), 'trying to bootstrap (without leader)') def test_bootstrap_not_running_concurrently(self): self.ha.cluster = get_cluster_bootstrapping_without_leader() self.p.can_create_replica_without_replication_connection = MagicMock(return_value=True) self.assertEqual(self.ha.bootstrap(), 'waiting for leader to bootstrap') def test_bootstrap_initialize_lock_failed(self): self.ha.cluster = get_cluster_not_initialized_without_leader() self.assertEqual(self.ha.bootstrap(), 'failed to acquire initialize lock') @patch('patroni.psycopg.connect', psycopg_connect) @patch('patroni.postgresql.citus.connect', psycopg_connect) @patch('patroni.postgresql.citus.quote_ident', Mock()) @patch.object(Postgresql, 'connection', Mock(return_value=None)) def test_bootstrap_initialized_new_cluster(self): self.ha.cluster = get_cluster_not_initialized_without_leader() self.e.initialize = true self.assertEqual(self.ha.bootstrap(), 'trying to bootstrap a new cluster') self.p.is_primary = false self.assertEqual(self.ha.run_cycle(), 'waiting for end of recovery after bootstrap') self.p.is_primary = true self.ha.is_synchronous_mode = true self.assertEqual(self.ha.run_cycle(), 'running post_bootstrap') self.assertEqual(self.ha.run_cycle(), 'initialized a new cluster') def test_bootstrap_release_initialize_key_on_failure(self): self.ha.cluster = get_cluster_not_initialized_without_leader() self.e.initialize = true self.ha.bootstrap() self.p.is_running = false self.assertRaises(PatroniFatalException, self.ha.post_bootstrap) @patch('patroni.psycopg.connect', psycopg_connect) @patch('patroni.postgresql.citus.connect', psycopg_connect) @patch('patroni.postgresql.citus.quote_ident', Mock()) @patch.object(Postgresql, 'connection', Mock(return_value=None)) def test_bootstrap_release_initialize_key_on_watchdog_failure(self): self.ha.cluster = get_cluster_not_initialized_without_leader() self.e.initialize = true self.ha.bootstrap() self.p.is_primary = true with patch.object(Watchdog, 'activate', Mock(return_value=False)), \ patch('patroni.ha.logger.error') as mock_logger: self.assertEqual(self.ha.post_bootstrap(), 'running post_bootstrap') self.assertRaises(PatroniFatalException, self.ha.post_bootstrap) self.assertTrue(mock_logger.call_args[0][0].startswith('Cancelling bootstrap because' ' watchdog activation failed')) @patch('patroni.psycopg.connect', psycopg_connect) def test_reinitialize(self): self.assertIsNotNone(self.ha.reinitialize()) self.ha.cluster = get_cluster_initialized_with_leader() self.assertIsNone(self.ha.reinitialize(True)) self.ha._async_executor.schedule('reinitialize') self.assertIsNotNone(self.ha.reinitialize()) self.ha.state_handler.name = self.ha.cluster.leader.name self.assertIsNotNone(self.ha.reinitialize()) @patch('time.sleep', Mock()) def test_restart(self): self.assertEqual(self.ha.restart({}), (True, 'restarted successfully')) self.p.restart = Mock(return_value=None) self.assertEqual(self.ha.restart({}), (False, 'postgres is still starting')) self.p.restart = false self.assertEqual(self.ha.restart({}), (False, 'restart failed')) self.ha.cluster = get_cluster_initialized_with_leader() self.ha._async_executor.schedule('reinitialize') self.assertEqual(self.ha.restart({}), (False, 'reinitialize already in progress')) with patch.object(self.ha, "restart_matches", return_value=False): self.assertEqual(self.ha.restart({'foo': 'bar'}), (False, "restart conditions are not satisfied")) @patch('time.sleep', Mock()) @patch.object(ConfigHandler, 'replace_pg_hba', Mock()) @patch.object(ConfigHandler, 'replace_pg_ident', Mock()) @patch.object(PostmasterProcess, 'start', Mock(return_value=MockPostmaster())) @patch('patroni.postgresql.citus.CitusHandler.is_coordinator', Mock(return_value=False)) def test_worker_restart(self): self.ha.has_lock = true self.ha.patroni.request = Mock() self.p.is_running = Mock(side_effect=[Mock(), False]) self.assertEqual(self.ha.restart({}), (True, 'restarted successfully')) self.ha.patroni.request.assert_called() self.assertEqual(self.ha.patroni.request.call_args_list[0][0][3]['type'], 'before_demote') self.assertEqual(self.ha.patroni.request.call_args_list[1][0][3]['type'], 'after_promote') @patch('os.kill', Mock()) def test_restart_in_progress(self): with patch('patroni.async_executor.AsyncExecutor.busy', PropertyMock(return_value=True)): self.ha._async_executor.schedule('restart') self.assertTrue(self.ha.restart_scheduled()) self.assertEqual(self.ha.run_cycle(), 'restart in progress') self.ha.cluster = get_cluster_initialized_with_leader() self.assertEqual(self.ha.run_cycle(), 'restart in progress') self.ha.has_lock = true self.assertEqual(self.ha.run_cycle(), 'updated leader lock during restart') self.ha.update_lock = false self.p.set_role('primary') with patch('patroni.async_executor.CriticalTask.cancel', Mock(return_value=False)), \ patch('patroni.async_executor.CriticalTask.result', PropertyMock(return_value=PostmasterProcess(os.getpid())), create=True), \ patch('patroni.postgresql.Postgresql.terminate_starting_postmaster') as mock_terminate: self.assertEqual(self.ha.run_cycle(), 'lost leader lock during restart') mock_terminate.assert_called() self.ha.is_paused = true self.assertEqual(self.ha.run_cycle(), 'PAUSE: restart in progress') @patch('patroni.postgresql.citus.CitusHandler.is_coordinator', Mock(return_value=False)) def test_manual_failover_from_leader(self): self.ha.has_lock = true # I am the leader # to me with patch('patroni.ha.logger.warning') as mock_warning: self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, '', self.p.name, None)) self.assertEqual(self.ha.run_cycle(), 'no action. I am (postgresql0), the leader with the lock') mock_warning.assert_called_with('%s: I am already the leader, no need to %s', 'manual failover', 'failover') # to a non-existent candidate with patch('patroni.ha.logger.warning') as mock_warning: self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, '', 'blabla', None)) self.assertEqual(self.ha.run_cycle(), 'no action. I am (postgresql0), the leader with the lock') mock_warning.assert_called_with( '%s: no healthy members found, %s is not possible', 'manual failover', 'failover') # to an existent candidate self.ha.fetch_node_status = get_node_status() self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, '', 'b', None)) self.ha.cluster.members.append(Member(0, 'b', 28, {'api_url': 'http://127.0.0.1:8011/patroni'})) self.assertEqual(self.ha.run_cycle(), 'manual failover: demoting myself') # to a candidate on an older timeline with patch('patroni.ha.logger.info') as mock_info: self.ha.fetch_node_status = get_node_status(timeline=1) self.assertEqual(self.ha.run_cycle(), 'no action. I am (postgresql0), the leader with the lock') self.assertEqual(mock_info.call_args_list[0][0], ('Timeline %s of member %s is behind the cluster timeline %s', 1, 'b', 2)) # to a lagging candidate with patch('patroni.ha.logger.info') as mock_info: self.ha.fetch_node_status = get_node_status(wal_position=1) self.ha.cluster.config.data.update({'maximum_lag_on_failover': 5}) self.assertEqual(self.ha.run_cycle(), 'no action. I am (postgresql0), the leader with the lock') self.assertEqual(mock_info.call_args_list[0][0], ('Member %s exceeds maximum replication lag', 'b')) self.ha.cluster.members.pop() @patch('patroni.postgresql.citus.CitusHandler.is_coordinator', Mock(return_value=False)) def test_manual_switchover_from_leader(self): self.ha.has_lock = true # I am the leader self.ha.fetch_node_status = get_node_status() # different leader specified in failover key, no candidate with patch('patroni.ha.logger.warning') as mock_warning: self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', '', None)) self.assertEqual(self.ha.run_cycle(), 'no action. I am (postgresql0), the leader with the lock') mock_warning.assert_called_with( '%s: leader name does not match: %s != %s', 'switchover', 'blabla', 'postgresql0') # no candidate self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, self.p.name, '', None)) self.assertEqual(self.ha.run_cycle(), 'switchover: demoting myself') self.ha._rewind.rewind_or_reinitialize_needed_and_possible = true self.assertEqual(self.ha.run_cycle(), 'switchover: demoting myself') # other members with failover_limitation_s with patch('patroni.ha.logger.info') as mock_info: self.ha.fetch_node_status = get_node_status(nofailover=True) self.assertEqual(self.ha.run_cycle(), 'no action. I am (postgresql0), the leader with the lock') self.assertEqual(mock_info.call_args_list[0][0], ('Member %s is %s', 'leader', 'not allowed to promote')) with patch('patroni.ha.logger.info') as mock_info: self.ha.fetch_node_status = get_node_status(watchdog_failed=True) self.assertEqual(self.ha.run_cycle(), 'no action. I am (postgresql0), the leader with the lock') self.assertEqual(mock_info.call_args_list[0][0], ('Member %s is %s', 'leader', 'not watchdog capable')) with patch('patroni.ha.logger.info') as mock_info: self.ha.fetch_node_status = get_node_status(timeline=1) self.assertEqual(self.ha.run_cycle(), 'no action. I am (postgresql0), the leader with the lock') self.assertEqual(mock_info.call_args_list[0][0], ('Timeline %s of member %s is behind the cluster timeline %s', 1, 'leader', 2)) with patch('patroni.ha.logger.info') as mock_info: self.ha.fetch_node_status = get_node_status(wal_position=1) self.ha.cluster.config.data.update({'maximum_lag_on_failover': 5}) self.ha.global_config = self.ha.patroni.config.get_global_config(self.ha.cluster) self.assertEqual(self.ha.run_cycle(), 'no action. I am (postgresql0), the leader with the lock') self.assertEqual(mock_info.call_args_list[0][0], ('Member %s exceeds maximum replication lag', 'leader')) @patch('patroni.postgresql.citus.CitusHandler.is_coordinator', Mock(return_value=False)) def test_scheduled_switchover_from_leader(self): self.ha.has_lock = true # I am the leader self.ha.fetch_node_status = get_node_status() # switchover scheduled time must include timezone with patch('patroni.ha.logger.warning') as mock_warning: scheduled = datetime.datetime.now() self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, self.p.name, 'blabla', scheduled)) self.assertEqual(self.ha.run_cycle(), 'no action. I am (postgresql0), the leader with the lock') self.assertIn('Incorrect value of scheduled_at: %s', mock_warning.call_args_list[0][0]) # scheduled now scheduled = datetime.datetime.utcnow().replace(tzinfo=tzutc) self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, self.p.name, 'b', scheduled)) self.ha.cluster.members.append(Member(0, 'b', 28, {'api_url': 'http://127.0.0.1:8011/patroni'})) self.assertEqual('switchover: demoting myself', self.ha.run_cycle()) # scheduled in the future with patch('patroni.ha.logger.info') as mock_info: scheduled = scheduled + datetime.timedelta(seconds=30) self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, self.p.name, 'blabla', scheduled)) self.assertEqual('no action. I am (postgresql0), the leader with the lock', self.ha.run_cycle()) self.assertIn('Awaiting %s at %s (in %.0f seconds)', mock_info.call_args_list[0][0]) # stale value with patch('patroni.ha.logger.warning') as mock_warning: scheduled = scheduled + datetime.timedelta(seconds=-600) self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, self.p.name, 'b', scheduled)) self.ha.cluster.members.append(Member(0, 'b', 28, {'api_url': 'http://127.0.0.1:8011/patroni'})) self.assertEqual('no action. I am (postgresql0), the leader with the lock', self.ha.run_cycle()) self.assertIn('Found a stale %s value, cleaning up: %s', mock_warning.call_args_list[0][0]) def test_manual_switchover_from_leader_in_pause(self): self.ha.has_lock = true # I am the leader self.ha.is_paused = true # no candidate self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, self.p.name, '', None)) with patch('patroni.ha.logger.warning') as mock_warning: self.assertEqual('PAUSE: no action. I am (postgresql0), the leader with the lock', self.ha.run_cycle()) mock_warning.assert_called_with( '%s is possible only to a specific candidate in a paused state', 'Switchover') def test_manual_failover_from_leader_in_pause(self): self.ha.has_lock = true self.ha.fetch_node_status = get_node_status() self.ha.is_paused = true # failover from me, candidate is healthy self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, None, 'b', None)) self.ha.cluster.members.append(Member(0, 'b', 28, {'api_url': 'http://127.0.0.1:8011/patroni'})) self.assertEqual('PAUSE: manual failover: demoting myself', self.ha.run_cycle()) self.ha.cluster.members.pop() def test_manual_failover_from_leader_in_synchronous_mode(self): self.ha.is_synchronous_mode = true self.ha.process_sync_replication = Mock() self.ha.fetch_node_status = get_node_status() # I am the leader self.p.is_primary = true self.ha.has_lock = true # the candidate is not in sync members but we allow failover to an async candidate self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, None, 'b', None), sync=(self.p.name, 'a')) self.ha.cluster.members.append(Member(0, 'b', 28, {'api_url': 'http://127.0.0.1:8011/patroni'})) self.assertEqual('manual failover: demoting myself', self.ha.run_cycle()) self.ha.cluster.members.pop() def test_manual_switchover_from_leader_in_synchronous_mode(self): self.ha.is_synchronous_mode = true self.ha.process_sync_replication = Mock() # I am the leader self.p.is_primary = true self.ha.has_lock = true # candidate specified is not in sync members with patch('patroni.ha.logger.warning') as mock_warning: self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, self.p.name, 'a', None), sync=(self.p.name, 'blabla')) self.assertEqual('no action. I am (postgresql0), the leader with the lock', self.ha.run_cycle()) self.assertEqual(mock_warning.call_args_list[0][0], ('%s candidate=%s does not match with sync_standbys=%s', 'Switchover', 'a', 'blabla')) # the candidate is in sync members and is healthy self.ha.fetch_node_status = get_node_status(wal_position=305419896) self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, self.p.name, 'a', None), sync=(self.p.name, 'a')) self.ha.cluster.members.append(Member(0, 'a', 28, {'api_url': 'http://127.0.0.1:8011/patroni'})) self.assertEqual('switchover: demoting myself', self.ha.run_cycle()) # the candidate is in sync members but is not healthy with patch('patroni.ha.logger.info') as mock_info: self.ha.fetch_node_status = get_node_status(nofailover=true) self.assertEqual('no action. I am (postgresql0), the leader with the lock', self.ha.run_cycle()) self.assertEqual(mock_info.call_args_list[0][0], ('Member %s is %s', 'a', 'not allowed to promote')) def test_manual_failover_process_no_leader(self): self.p.is_primary = false self.p.set_role('replica') # failover to another member, fetch_node_status for candidate fails with patch('patroni.ha.logger.warning') as mock_warning: self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, '', 'leader', None)) self.assertEqual(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') self.assertEqual(mock_warning.call_args_list[1][0], ('%s: member %s is %s', 'manual failover', 'leader', 'not reachable')) # failover to another member, candidate is accessible, in_recovery self.p.set_role('replica') self.ha.fetch_node_status = get_node_status() self.assertEqual(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node') # set nofailover flag to True for all members of the cluster # this should elect the current member, as we are not going to call the API for it. self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, '', 'other', None)) self.ha.fetch_node_status = get_node_status(nofailover=True) self.assertEqual(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') # failover to me but I am set to nofailover. In no case I should be elected as a leader self.p.set_role('replica') self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, '', 'postgresql0', None)) self.ha.patroni.nofailover = True self.assertEqual(self.ha.run_cycle(), 'following a different leader because I am not allowed to promote') self.ha.patroni.nofailover = False # failover to another member that is on an older timeline (only failover_limitation() is checked) with patch('patroni.ha.logger.info') as mock_info: self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, '', 'b', None)) self.ha.cluster.members.append(Member(0, 'b', 28, {'api_url': 'http://127.0.0.1:8011/patroni'})) self.ha.fetch_node_status = get_node_status(timeline=1) self.assertEqual(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node') mock_info.assert_called_with('%s: to %s, i am %s', 'manual failover', 'b', 'postgresql0') # failover to another member lagging behind the cluster_lsn (only failover_limitation() is checked) with patch('patroni.ha.logger.info') as mock_info: self.ha.cluster.config.data.update({'maximum_lag_on_failover': 5}) self.ha.fetch_node_status = get_node_status(wal_position=1) self.assertEqual(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node') mock_info.assert_called_with('%s: to %s, i am %s', 'manual failover', 'b', 'postgresql0') def test_manual_switchover_process_no_leader(self): self.p.is_primary = false self.p.set_role('replica') # I was the leader, other members are healthy self.ha.fetch_node_status = get_node_status() self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, self.p.name, '', None)) self.assertEqual(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node') # I was the leader, I am the only healthy member with patch('patroni.ha.logger.info') as mock_info: self.ha.fetch_node_status = get_node_status(reachable=False) # inaccessible, in_recovery self.assertEqual(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') self.assertEqual(mock_info.call_args_list[0][0], ('Member %s is %s', 'leader', 'not reachable')) self.assertEqual(mock_info.call_args_list[1][0], ('Member %s is %s', 'other', 'not reachable')) def test_manual_failover_process_no_leader_in_synchronous_mode(self): self.ha.is_synchronous_mode = true self.p.is_primary = false self.ha.fetch_node_status = get_node_status(nofailover=True) # other nodes are not healthy # manual failover when our name (postgresql0) isn't in the /sync key and the candidate node is not available self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, '', 'other', None), sync=('leader1', 'blabla')) self.assertEqual(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node') # manual failover when the candidate node isn't available but our name is in the /sync key # while other sync node is nofailover with patch('patroni.ha.logger.warning') as mock_warning: self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, '', 'other', None), sync=('leader1', 'postgresql0')) self.p.sync_handler.current_state = Mock(return_value=(CaseInsensitiveSet(), CaseInsensitiveSet())) self.ha.dcs.write_sync_state = Mock(return_value=SyncState.empty()) self.assertEqual(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') self.assertEqual(mock_warning.call_args_list[0][0], ('%s: member %s is %s', 'manual failover', 'other', 'not allowed to promote')) # manual failover to our node (postgresql0), # which name is not in sync nodes list (some sync nodes are available) self.p.set_role('replica') self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, '', 'postgresql0', None), sync=('leader1', 'other')) self.p.sync_handler.current_state = Mock(return_value=(CaseInsensitiveSet(['leader1']), CaseInsensitiveSet(['leader1']))) self.assertEqual(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') def test_manual_switchover_process_no_leader_in_synchronous_mode(self): self.ha.is_synchronous_mode = true self.p.is_primary = false # to a specific node, which name doesn't match our name (postgresql0) self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, 'leader', 'other', None)) self.assertEqual(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node') # to our node (postgresql0), which name is not in sync nodes list self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, 'leader', 'postgresql0', None), sync=('leader1', 'blabla')) self.assertEqual(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node') # without candidate, our name (postgresql0) is not in the sync nodes list self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, 'leader', '', None), sync=('leader', 'blabla')) self.assertEqual(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node') # switchover from a specific leader, but the only sync node (us, postgresql0) has nofailover tag self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, 'leader', '', None), sync=('postgresql0')) self.ha.patroni.nofailover = True self.assertEqual(self.ha.run_cycle(), 'following a different leader because I am not allowed to promote') def test_manual_failover_process_no_leader_in_pause(self): self.ha.is_paused = true # I am running as primary, cluster is unlocked, the candidate is allowed to promote # but we are in pause self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, '', 'other', None)) self.assertEqual(self.ha.run_cycle(), 'PAUSE: continue to run as primary without lock') def test_manual_switchover_process_no_leader_in_pause(self): self.ha.is_paused = true # I am running as primary, cluster is unlocked, no candidate specified self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, 'leader', '', None)) self.assertEqual(self.ha.run_cycle(), 'PAUSE: continue to run as primary without lock') # the candidate is not running with patch('patroni.ha.logger.warning') as mock_warning: self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, 'leader', 'blabla', None)) self.assertEqual('PAUSE: acquired session lock as a leader', self.ha.run_cycle()) self.assertEqual( mock_warning.call_args_list[0][0], ('%s: removing failover key because failover candidate is not running', 'switchover')) # switchover to me, I am not leader self.p.is_primary = false self.p.set_role('replica') self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, 'leader', self.p.name, None)) self.assertEqual(self.ha.run_cycle(), 'PAUSE: promoted self to leader by acquiring session lock') def test_is_healthiest_node(self): self.ha.is_failsafe_mode = true self.ha.state_handler.is_primary = false self.ha.patroni.nofailover = False self.ha.fetch_node_status = get_node_status() self.ha.dcs._last_failsafe = {'foo': ''} self.assertFalse(self.ha.is_healthiest_node()) self.ha.dcs._last_failsafe = {'postgresql0': ''} self.assertTrue(self.ha.is_healthiest_node()) self.ha.dcs._last_failsafe = None with patch.object(Watchdog, 'is_healthy', PropertyMock(return_value=False)): self.assertFalse(self.ha.is_healthiest_node()) self.ha.is_paused = true self.assertFalse(self.ha.is_healthiest_node()) def test__is_healthiest_node(self): self.p.is_primary = false self.ha.cluster = get_cluster_initialized_without_leader(sync=('postgresql1', self.p.name)) self.ha.global_config = self.ha.patroni.config.get_global_config(self.ha.cluster) self.assertTrue(self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.ha.fetch_node_status = get_node_status() # accessible, in_recovery self.assertTrue(self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.ha.fetch_node_status = get_node_status(in_recovery=False) # accessible, not in_recovery self.assertFalse(self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.ha.fetch_node_status = get_node_status(failover_priority=2) # accessible, in_recovery, higher priority self.assertFalse(self.ha._is_healthiest_node(self.ha.old_cluster.members)) # if there is a higher-priority node but it has a lower WAL position then this node should race self.ha.fetch_node_status = get_node_status(failover_priority=6, wal_position=9) self.assertTrue(self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.ha.fetch_node_status = get_node_status(wal_position=11) # accessible, in_recovery, wal position ahead self.assertFalse(self.ha._is_healthiest_node(self.ha.old_cluster.members)) # in synchronous_mode consider itself healthy if the former leader is accessible in read-only and ahead of us with patch.object(Ha, 'is_synchronous_mode', Mock(return_value=True)): self.assertTrue(self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.ha.cluster.config.data.update({'maximum_lag_on_failover': 5}) self.ha.global_config = self.ha.patroni.config.get_global_config(self.ha.cluster) with patch('patroni.postgresql.Postgresql.last_operation', return_value=1): self.assertFalse(self.ha._is_healthiest_node(self.ha.old_cluster.members)) with patch('patroni.postgresql.Postgresql.replica_cached_timeline', return_value=None): self.assertFalse(self.ha._is_healthiest_node(self.ha.old_cluster.members)) with patch('patroni.postgresql.Postgresql.replica_cached_timeline', return_value=1): self.assertFalse(self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.ha.patroni.nofailover = True self.assertFalse(self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.ha.patroni.nofailover = None self.ha.patroni.failover_priority = 0 self.assertFalse(self.ha._is_healthiest_node(self.ha.old_cluster.members)) def test_fetch_node_status(self): member = Member(0, 'test', 1, {'api_url': 'http://127.0.0.1:8011/patroni'}) self.ha.fetch_node_status(member) member = Member(0, 'test', 1, {'api_url': 'http://localhost:8011/patroni'}) self.ha.patroni.request = Mock() self.ha.patroni.request.return_value.data = b'{"wal":{"location":1},"role":"primary"}' ret = self.ha.fetch_node_status(member) self.assertFalse(ret.in_recovery) @patch.object(Rewind, 'pg_rewind', true) @patch.object(Rewind, 'check_leader_is_not_in_recovery', true) @patch('os.listdir', Mock(return_value=[])) @patch('patroni.postgresql.rewind.fsync_dir', Mock()) def test_post_recover(self): self.p.is_running = false self.ha.has_lock = true self.p.set_role('primary') self.assertEqual(self.ha.post_recover(), 'removed leader key after trying and failing to start postgres') self.ha.has_lock = false self.assertEqual(self.ha.post_recover(), 'failed to start postgres') leader = Leader(0, 0, Member(0, 'l', 2, {"version": "1.6", "conn_url": "postgres://a", "role": "primary"})) self.ha._rewind.execute(leader) self.p.is_running = true self.assertIsNone(self.ha.post_recover()) def test_schedule_future_restart(self): self.ha.patroni.scheduled_restart = {} # do the restart 2 times. The first one should succeed, the second one should fail self.assertTrue(self.ha.schedule_future_restart({'schedule': future_restart_time})) self.assertFalse(self.ha.schedule_future_restart({'schedule': future_restart_time})) def test_delete_future_restarts(self): self.ha.delete_future_restart() def test_evaluate_scheduled_restart(self): self.p.postmaster_start_time = Mock(return_value=str(postmaster_start_time)) # restart already in progress with patch('patroni.async_executor.AsyncExecutor.busy', PropertyMock(return_value=True)): self.assertIsNone(self.ha.evaluate_scheduled_restart()) # restart while the postmaster has been already restarted, fails with patch.object(self.ha, 'future_restart_scheduled', Mock(return_value={'postmaster_start_time': str(postmaster_start_time - datetime.timedelta(days=1)), 'schedule': str(future_restart_time)})): self.assertIsNone(self.ha.evaluate_scheduled_restart()) with patch.object(self.ha, 'future_restart_scheduled', Mock(return_value={'postmaster_start_time': str(postmaster_start_time), 'schedule': str(future_restart_time)})): with patch.object(self.ha, 'should_run_scheduled_action', Mock(return_value=True)): # restart in the future, ok self.assertIsNotNone(self.ha.evaluate_scheduled_restart()) with patch.object(self.ha, 'restart', Mock(return_value=(False, "Test"))): # restart in the future, bit the actual restart failed self.assertIsNone(self.ha.evaluate_scheduled_restart()) def test_scheduled_restart(self): self.ha.cluster = get_cluster_initialized_with_leader() with patch.object(self.ha, "evaluate_scheduled_restart", Mock(return_value="restart scheduled")): self.assertEqual(self.ha.run_cycle(), "restart scheduled") def test_restart_matches(self): self.p._role = 'replica' self.p._connection.server_version = 90500 self.p._pending_restart = True self.assertFalse(self.ha.restart_matches("primary", "9.5.0", True)) self.assertFalse(self.ha.restart_matches("replica", "9.4.3", True)) self.p._pending_restart = False self.assertFalse(self.ha.restart_matches("replica", "9.5.2", True)) self.assertTrue(self.ha.restart_matches("replica", "9.5.2", False)) def test_process_healthy_cluster_in_pause(self): self.p.is_primary = false self.ha.is_paused = true self.p.name = 'leader' self.ha.cluster = get_cluster_initialized_with_leader() self.assertEqual(self.ha.run_cycle(), 'PAUSE: removed leader lock because postgres is not running as primary') self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, '', self.p.name, None)) self.assertEqual(self.ha.run_cycle(), 'PAUSE: waiting to become primary after promote...') @patch('patroni.postgresql.mtime', Mock(return_value=1588316884)) @patch('builtins.open', mock_open(read_data='1\t0/40159C0\tno recovery target specified\n')) def test_process_healthy_standby_cluster_as_standby_leader(self): self.p.is_primary = false self.p.name = 'leader' self.ha.cluster = get_standby_cluster_initialized_with_only_leader() self.p.config.check_recovery_conf = Mock(return_value=(False, False)) self.ha._leader_timeline = 1 self.assertEqual(self.ha.run_cycle(), 'promoted self to a standby leader because i had the session lock') self.assertEqual(self.ha.run_cycle(), 'no action. I am (leader), the standby leader with the lock') self.p.set_role('replica') self.p.config.check_recovery_conf = Mock(return_value=(True, False)) self.assertEqual(self.ha.run_cycle(), 'promoted self to a standby leader because i had the session lock') def test_process_healthy_standby_cluster_as_cascade_replica(self): self.p.is_primary = false self.p.name = 'replica' self.ha.cluster = get_standby_cluster_initialized_with_only_leader() self.assertEqual(self.ha.run_cycle(), 'no action. I am (replica), a secondary, and following a standby leader (leader)') with patch.object(Leader, 'conn_url', PropertyMock(return_value='')): self.assertEqual(self.ha.run_cycle(), 'continue following the old known standby leader') @patch.object(Cluster, 'is_unlocked', Mock(return_value=True)) def test_process_unhealthy_standby_cluster_as_standby_leader(self): self.p.is_primary = false self.p.name = 'leader' self.ha.cluster = get_standby_cluster_initialized_with_only_leader() self.ha.sysid_valid = true self.p._sysid = True self.assertEqual(self.ha.run_cycle(), 'promoted self to a standby leader by acquiring session lock') @patch.object(Rewind, 'rewind_or_reinitialize_needed_and_possible', Mock(return_value=True)) @patch.object(Rewind, 'can_rewind', PropertyMock(return_value=True)) def test_process_unhealthy_standby_cluster_as_cascade_replica(self): self.p.is_primary = false self.p.name = 'replica' self.ha.cluster = get_standby_cluster_initialized_with_only_leader() self.assertTrue(self.ha.run_cycle().startswith('running pg_rewind from remote_member:')) def test_recover_unhealthy_leader_in_standby_cluster(self): self.p.is_primary = false self.p.name = 'leader' self.p.is_running = false self.p.follow = false self.ha.cluster = get_standby_cluster_initialized_with_only_leader() self.assertEqual(self.ha.run_cycle(), 'starting as a standby leader because i had the session lock') @patch.object(Cluster, 'is_unlocked', Mock(return_value=True)) def test_recover_unhealthy_unlocked_standby_cluster(self): self.p.is_primary = false self.p.name = 'leader' self.p.is_running = false self.p.follow = false self.ha.cluster = get_standby_cluster_initialized_with_only_leader() self.ha.has_lock = false self.assertEqual(self.ha.run_cycle(), 'trying to follow a remote member because standby cluster is unhealthy') def test_failed_to_update_lock_in_pause(self): self.ha.update_lock = false self.ha.is_paused = true self.p.name = 'leader' self.ha.cluster = get_cluster_initialized_with_leader() self.assertEqual(self.ha.run_cycle(), 'PAUSE: continue to run as primary after failing to update leader lock in DCS') def test_postgres_unhealthy_in_pause(self): self.ha.is_paused = true self.p.is_healthy = false self.assertEqual(self.ha.run_cycle(), 'PAUSE: postgres is not running') self.ha.has_lock = true self.assertEqual(self.ha.run_cycle(), 'PAUSE: removed leader lock because postgres is not running') def test_no_etcd_connection_in_pause(self): self.ha.is_paused = true self.ha.load_cluster_from_dcs = Mock(side_effect=DCSError('Etcd is not responding properly')) self.assertEqual(self.ha.run_cycle(), 'PAUSE: DCS is not accessible') @patch('patroni.ha.Ha.update_lock', return_value=True) @patch('patroni.ha.Ha.demote') def test_starting_timeout(self, demote, update_lock): def check_calls(seq): for mock, called in seq: if called: mock.assert_called_once() else: mock.assert_not_called() mock.reset_mock() self.ha.has_lock = true self.ha.cluster = get_cluster_initialized_with_leader() self.p.check_for_startup = true self.p.time_in_state = lambda: 30 self.assertEqual(self.ha.run_cycle(), 'PostgreSQL is still starting up, 270 seconds until timeout') check_calls([(update_lock, True), (demote, False)]) self.p.time_in_state = lambda: 350 self.ha.fetch_node_status = get_node_status(reachable=False) # inaccessible, in_recovery self.assertEqual(self.ha.run_cycle(), 'primary start has timed out, but continuing to wait because failover is not possible') check_calls([(update_lock, True), (demote, False)]) self.ha.fetch_node_status = get_node_status() # accessible, in_recovery self.assertEqual(self.ha.run_cycle(), 'stopped PostgreSQL because of startup timeout') check_calls([(update_lock, True), (demote, True)]) update_lock.return_value = False self.assertEqual(self.ha.run_cycle(), 'stopped PostgreSQL while starting up because leader key was lost') check_calls([(update_lock, True), (demote, True)]) self.ha.has_lock = false self.p.is_primary = false self.assertEqual(self.ha.run_cycle(), 'no action. I am (postgresql0), a secondary, and following a leader (leader)') check_calls([(update_lock, False), (demote, False)]) def test_manual_failover_while_starting(self): self.ha.has_lock = true self.p.check_for_startup = true f = Failover(0, self.p.name, '', None) self.ha.cluster = get_cluster_initialized_with_leader(f) self.ha.fetch_node_status = get_node_status() # accessible, in_recovery self.assertEqual(self.ha.run_cycle(), 'switchover: demoting myself') @patch('patroni.ha.Ha.demote') def test_failover_immediately_on_zero_primary_start_timeout(self, demote): self.p.is_running = false self.ha.cluster = get_cluster_initialized_with_leader(sync=(self.p.name, 'other')) self.ha.cluster.config.data.update({'synchronous_mode': True, 'primary_start_timeout': 0}) self.ha.global_config = self.ha.patroni.config.get_global_config(self.ha.cluster) self.ha.has_lock = true self.ha.update_lock = true self.ha.fetch_node_status = get_node_status() # accessible, in_recovery self.assertEqual(self.ha.run_cycle(), 'stopped PostgreSQL to fail over after a crash') demote.assert_called_once() def test_primary_stop_timeout(self): self.assertEqual(self.ha.primary_stop_timeout(), None) self.ha.cluster.config.data.update({'primary_stop_timeout': 30}) self.ha.global_config = self.ha.patroni.config.get_global_config(self.ha.cluster) with patch.object(Ha, 'is_synchronous_mode', Mock(return_value=True)): self.assertEqual(self.ha.primary_stop_timeout(), 30) with patch.object(Ha, 'is_synchronous_mode', Mock(return_value=False)): self.assertEqual(self.ha.primary_stop_timeout(), None) self.ha.cluster.config.data['primary_stop_timeout'] = None self.ha.global_config = self.ha.patroni.config.get_global_config(self.ha.cluster) self.assertEqual(self.ha.primary_stop_timeout(), None) @patch('patroni.postgresql.Postgresql.follow') def test_demote_immediate(self, follow): self.ha.has_lock = true self.e.get_cluster = Mock(return_value=get_cluster_initialized_without_leader()) self.ha.demote('immediate') follow.assert_called_once_with(None) def test_process_sync_replication(self): self.ha.has_lock = true mock_set_sync = self.p.sync_handler.set_synchronous_standby_names = Mock() self.p.name = 'leader' # Test sync key removed when sync mode disabled self.ha.cluster = get_cluster_initialized_with_leader(sync=('leader', 'other')) with patch.object(self.ha.dcs, 'delete_sync_state') as mock_delete_sync: self.ha.run_cycle() mock_delete_sync.assert_called_once() mock_set_sync.assert_called_once_with(CaseInsensitiveSet()) mock_set_sync.reset_mock() # Test sync key not touched when not there self.ha.cluster = get_cluster_initialized_with_leader() with patch.object(self.ha.dcs, 'delete_sync_state') as mock_delete_sync: self.ha.run_cycle() mock_delete_sync.assert_not_called() mock_set_sync.assert_called_once_with(CaseInsensitiveSet()) mock_set_sync.reset_mock() self.ha.is_synchronous_mode = true # Test sync standby not touched when picking the same node self.p.sync_handler.current_state = Mock(return_value=(CaseInsensitiveSet(['other']), CaseInsensitiveSet(['other']))) self.ha.cluster = get_cluster_initialized_with_leader(sync=('leader', 'other')) self.ha.run_cycle() mock_set_sync.assert_not_called() mock_set_sync.reset_mock() # Test sync standby is replaced when switching standbys self.p.sync_handler.current_state = Mock(return_value=(CaseInsensitiveSet(['other2']), CaseInsensitiveSet())) self.ha.dcs.write_sync_state = Mock(return_value=SyncState.empty()) self.ha.run_cycle() mock_set_sync.assert_called_once_with(CaseInsensitiveSet(['other2'])) # Test sync standby is replaced when new standby is joined self.p.sync_handler.current_state = Mock(return_value=(CaseInsensitiveSet(['other2', 'other3']), CaseInsensitiveSet(['other2']))) self.ha.dcs.write_sync_state = Mock(return_value=SyncState.empty()) self.ha.run_cycle() self.assertEqual(mock_set_sync.call_args_list[0][0], (CaseInsensitiveSet(['other2']),)) self.assertEqual(mock_set_sync.call_args_list[1][0], (CaseInsensitiveSet(['other2', 'other3']),)) mock_set_sync.reset_mock() # Test sync standby is not disabled when updating dcs fails self.ha.dcs.write_sync_state = Mock(return_value=None) self.ha.run_cycle() mock_set_sync.assert_not_called() mock_set_sync.reset_mock() # Test changing sync standby self.ha.dcs.write_sync_state = Mock(return_value=SyncState.empty()) self.ha.dcs.get_cluster = Mock(return_value=get_cluster_initialized_with_leader(sync=('leader', 'other'))) # self.ha.cluster = get_cluster_initialized_with_leader(sync=('leader', 'other')) self.p.sync_handler.current_state = Mock(return_value=(CaseInsensitiveSet(['other2']), CaseInsensitiveSet(['other2']))) self.ha.run_cycle() self.assertEqual(self.ha.dcs.write_sync_state.call_count, 2) # Test updating sync standby key failed due to race self.ha.dcs.write_sync_state = Mock(side_effect=[SyncState.empty(), None]) self.ha.run_cycle() self.assertEqual(self.ha.dcs.write_sync_state.call_count, 2) # Test updating sync standby key failed due to DCS being not accessible self.ha.dcs.write_sync_state = Mock(return_value=SyncState.empty()) self.ha.dcs.get_cluster = Mock(side_effect=DCSError('foo')) self.ha.run_cycle() # Test changing sync standby failed due to race self.ha.dcs.write_sync_state = Mock(return_value=SyncState.empty()) self.ha.dcs.get_cluster = Mock(return_value=get_cluster_initialized_with_leader(sync=('somebodyelse', None))) self.ha.run_cycle() self.assertEqual(self.ha.dcs.write_sync_state.call_count, 2) # Test sync set to '*' when synchronous_mode_strict is enabled mock_set_sync.reset_mock() self.p.sync_handler.current_state = Mock(return_value=(CaseInsensitiveSet(), CaseInsensitiveSet())) with patch('patroni.config.GlobalConfig.is_synchronous_mode_strict', PropertyMock(return_value=True)): self.ha.run_cycle() mock_set_sync.assert_called_once_with(CaseInsensitiveSet('*')) def test_sync_replication_become_primary(self): self.ha.is_synchronous_mode = true mock_set_sync = self.p.sync_handler.set_synchronous_standby_names = Mock() self.p.is_primary = false self.p.set_role('replica') self.ha.has_lock = true mock_write_sync = self.ha.dcs.write_sync_state = Mock(return_value=SyncState.empty()) self.p.name = 'leader' self.ha.cluster = get_cluster_initialized_with_leader(sync=('other', None)) # When we just became primary nobody is sync self.assertEqual(self.ha.enforce_primary_role('msg', 'promote msg'), 'promote msg') mock_set_sync.assert_called_once_with(CaseInsensitiveSet()) mock_write_sync.assert_called_once_with('leader', None, version=0) mock_set_sync.reset_mock() # When we just became primary nobody is sync self.p.set_role('replica') mock_write_sync.return_value = False self.assertTrue(self.ha.enforce_primary_role('msg', 'promote msg') != 'promote msg') mock_set_sync.assert_not_called() def test_unhealthy_sync_mode(self): self.ha.is_synchronous_mode = true self.p.is_primary = false self.p.set_role('replica') self.p.name = 'other' self.ha.cluster = get_cluster_initialized_without_leader(sync=('leader', 'other2')) mock_write_sync = self.ha.dcs.write_sync_state = Mock(return_value=SyncState.empty()) mock_acquire = self.ha.acquire_lock = Mock(return_value=True) mock_follow = self.p.follow = Mock() mock_promote = self.p.promote = Mock() # If we don't match the sync replica we are not allowed to acquire lock self.ha.run_cycle() mock_acquire.assert_not_called() mock_follow.assert_called_once() self.assertEqual(mock_follow.call_args[0][0], None) mock_write_sync.assert_not_called() mock_follow.reset_mock() # If we do match we will try to promote self.ha._is_healthiest_node = true self.ha.cluster = get_cluster_initialized_without_leader(sync=('leader', 'other')) self.ha.run_cycle() mock_acquire.assert_called_once() mock_follow.assert_not_called() mock_promote.assert_called_once() mock_write_sync.assert_called_once_with('other', None, version=0) def test_disable_sync_when_restarting(self): self.ha.is_synchronous_mode = true self.p.name = 'other' self.p.is_primary = false self.p.set_role('replica') mock_restart = self.p.restart = Mock(return_value=True) self.ha.cluster = get_cluster_initialized_with_leader(sync=('leader', 'other')) self.ha.touch_member = Mock(return_value=True) self.ha.dcs.get_cluster = Mock(side_effect=[ get_cluster_initialized_with_leader(sync=('leader', syncstandby)) for syncstandby in ['other', None]]) with patch('time.sleep') as mock_sleep: self.ha.restart({}) mock_restart.assert_called_once() mock_sleep.assert_called() # Restart is still called when DCS connection fails mock_restart.reset_mock() self.ha.dcs.get_cluster = Mock(side_effect=DCSError("foo")) self.ha.restart({}) mock_restart.assert_called_once() # We don't try to fetch the cluster state when touch_member fails mock_restart.reset_mock() self.ha.dcs.get_cluster.reset_mock() self.ha.touch_member = Mock(return_value=False) self.ha.restart({}) mock_restart.assert_called_once() self.ha.dcs.get_cluster.assert_not_called() @patch.object(Cluster, 'is_unlocked', Mock(return_value=False)) def test_enable_synchronous_mode(self): self.ha.is_synchronous_mode = true self.ha.has_lock = true self.p.name = 'leader' self.p.sync_handler.current_state = Mock(return_value=(CaseInsensitiveSet(), CaseInsensitiveSet())) self.ha.dcs.write_sync_state = Mock(return_value=SyncState.empty()) with patch('patroni.ha.logger.info') as mock_logger: self.ha.run_cycle() self.assertEqual(mock_logger.call_args_list[0][0][0], 'Enabled synchronous replication') self.ha.dcs.write_sync_state = Mock(return_value=None) with patch('patroni.ha.logger.warning') as mock_logger: self.ha.run_cycle() self.assertEqual(mock_logger.call_args[0][0], 'Updating sync state failed') @patch.object(Cluster, 'is_unlocked', Mock(return_value=False)) def test_inconsistent_synchronous_state(self): self.ha.is_synchronous_mode = true self.ha.has_lock = true self.p.name = 'leader' self.ha.cluster = get_cluster_initialized_without_leader(sync=('leader', 'a')) self.p.sync_handler.current_state = Mock(return_value=(CaseInsensitiveSet('a'), CaseInsensitiveSet())) self.ha.dcs.write_sync_state = Mock(return_value=SyncState.empty()) mock_set_sync = self.p.sync_handler.set_synchronous_standby_names = Mock() with patch('patroni.ha.logger.warning') as mock_logger: self.ha.run_cycle() mock_set_sync.assert_called_once() self.assertTrue(mock_logger.call_args_list[0][0][0].startswith('Inconsistent state between ')) self.ha.dcs.write_sync_state = Mock(return_value=None) with patch('patroni.ha.logger.warning') as mock_logger: self.ha.run_cycle() self.assertEqual(mock_logger.call_args[0][0], 'Updating sync state failed') def test_effective_tags(self): self.ha._disable_sync = True self.assertEqual(self.ha.get_effective_tags(), {'foo': 'bar', 'nosync': True}) self.ha._disable_sync = False self.assertEqual(self.ha.get_effective_tags(), {'foo': 'bar'}) @patch('patroni.postgresql.mtime', Mock(return_value=1588316884)) @patch('builtins.open', Mock(side_effect=Exception)) @patch.object(Cluster, 'is_unlocked', Mock(return_value=False)) def test_restore_cluster_config(self): self.ha.cluster.config.data.clear() self.ha.has_lock = true self.assertEqual(self.ha.run_cycle(), 'no action. I am (postgresql0), the leader with the lock') def test_watch(self): self.ha.cluster = get_cluster_initialized_with_leader() self.ha.watch(0) def test_wakup(self): self.ha.wakeup() def test_shutdown(self): self.p.is_running = false self.ha.is_leader = true def stop(*args, **kwargs): kwargs['on_shutdown'](123, 120) self.p.stop = stop self.ha.shutdown() self.ha.is_failover_possible = true self.ha.shutdown() @patch('patroni.postgresql.citus.CitusHandler.is_coordinator', Mock(return_value=False)) def test_shutdown_citus_worker(self): self.ha.is_leader = true self.p.is_running = Mock(side_effect=[Mock(), False]) self.ha.patroni.request = Mock() self.ha.shutdown() self.ha.patroni.request.assert_called() self.assertEqual(self.ha.patroni.request.call_args[0][2], 'citus') self.assertEqual(self.ha.patroni.request.call_args[0][3]['type'], 'before_demote') @patch('time.sleep', Mock()) def test_leader_with_not_accessible_data_directory(self): self.ha.cluster = get_cluster_initialized_with_leader() self.ha.has_lock = true self.p.data_directory_empty = Mock(side_effect=OSError(5, "Input/output error: '{}'".format(self.p.data_dir))) self.assertEqual(self.ha.run_cycle(), 'released leader key voluntarily as data dir not accessible and currently leader') self.assertEqual(self.p.role, 'uninitialized') # as has_lock is mocked out, we need to fake the leader key release self.ha.has_lock = false # will not say bootstrap because data directory is not accessible self.assertEqual(self.ha.run_cycle(), "data directory is not accessible: [Errno 5] Input/output error: '{}'".format(self.p.data_dir)) @patch('patroni.postgresql.mtime', Mock(return_value=1588316884)) @patch('builtins.open', mock_open(read_data=('1\t0/40159C0\tno recovery target specified\n\n' '2\t1/40159C0\tno recovery target specified\n'))) @patch.object(Cluster, 'is_unlocked', Mock(return_value=False)) def test_update_cluster_history(self): self.ha.has_lock = true for tl in (1, 3): self.p.get_primary_timeline = Mock(return_value=tl) self.assertEqual(self.ha.run_cycle(), 'no action. I am (postgresql0), the leader with the lock') @patch('sys.exit', return_value=1) def test_abort_join(self, exit_mock): self.ha.cluster = get_cluster_not_initialized_without_leader() self.p.is_primary = false self.ha.run_cycle() exit_mock.assert_called_once_with(1) self.p.set_role('replica') self.ha.dcs.initialize = Mock() with patch.object(Postgresql, 'cb_called', PropertyMock(return_value=True)): self.assertEqual(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') self.ha.dcs.initialize.assert_not_called() @patch.object(Cluster, 'is_unlocked', Mock(return_value=False)) def test_after_pause(self): self.ha.has_lock = true self.ha.is_paused = true self.assertEqual(self.ha.run_cycle(), 'PAUSE: no action. I am (postgresql0), the leader with the lock') self.ha.is_paused = false self.assertEqual(self.ha.run_cycle(), 'no action. I am (postgresql0), the leader with the lock') @patch('patroni.psycopg.connect', psycopg_connect) def test_permanent_logical_slots_after_promote(self): self.p._major_version = 110000 config = ClusterConfig(1, {'slots': {'l': {'database': 'postgres', 'plugin': 'test_decoding'}}}, 1) self.p.name = 'other' self.ha.cluster = get_cluster_initialized_without_leader(cluster_config=config) self.assertEqual(self.ha.run_cycle(), 'acquired session lock as a leader') self.ha.cluster = get_cluster_initialized_without_leader(leader=True, cluster_config=config) self.ha.has_lock = true self.assertEqual(self.ha.run_cycle(), 'no action. I am (other), the leader with the lock') @patch.object(Cluster, 'has_member', true) def test_run_cycle(self): self.ha.dcs.touch_member = Mock(side_effect=DCSError('foo')) self.assertEqual(self.ha.run_cycle(), 'Unexpected exception raised, please report it as a BUG') self.ha.dcs.touch_member = Mock(side_effect=PatroniFatalException('foo')) self.assertRaises(PatroniFatalException, self.ha.run_cycle) def test_empty_directory_in_pause(self): self.ha.is_paused = true self.p.data_directory_empty = true self.assertEqual(self.ha.run_cycle(), 'PAUSE: running with empty data directory') self.assertEqual(self.p.role, 'uninitialized') @patch('patroni.ha.Ha.sysid_valid', MagicMock(return_value=True)) def test_sysid_no_match_in_pause(self): self.ha.is_paused = true self.p.controldata = lambda: {'Database cluster state': 'in recovery', 'Database system identifier': '123'} self.assertEqual(self.ha.run_cycle(), 'PAUSE: continue to run as primary without lock') self.ha.has_lock = true self.assertEqual(self.ha.run_cycle(), 'PAUSE: released leader key voluntarily due to the system ID mismatch') @patch('patroni.psycopg.connect', psycopg_connect) @patch('os.path.exists', Mock(return_value=True)) @patch('shutil.rmtree', Mock()) @patch('os.makedirs', Mock()) @patch('os.open', Mock()) @patch('os.fsync', Mock()) @patch('os.close', Mock()) @patch('os.chmod', Mock()) @patch('os.rename', Mock()) @patch('patroni.postgresql.Postgresql.is_starting', Mock(return_value=False)) @patch('builtins.open', mock_open()) @patch.object(ConfigHandler, 'check_recovery_conf', Mock(return_value=(False, False))) @patch.object(Postgresql, 'major_version', PropertyMock(return_value=130000)) @patch.object(SlotsHandler, 'sync_replication_slots', Mock(return_value=['ls'])) def test_follow_copy(self): self.ha.cluster.config.data['slots'] = {'ls': {'database': 'a', 'plugin': 'b'}} self.p.is_primary = false self.assertTrue(self.ha.run_cycle().startswith('Copying logical slots')) def test_acquire_lock(self): self.ha.dcs.attempt_to_acquire_leader = Mock(side_effect=[DCSError('foo'), Exception]) self.assertRaises(DCSError, self.ha.acquire_lock) self.assertFalse(self.ha.acquire_lock()) @patch('patroni.postgresql.citus.CitusHandler.is_coordinator', Mock(return_value=False)) def test_notify_citus_coordinator(self): self.ha.patroni.request = Mock() self.ha.notify_citus_coordinator('before_demote') self.ha.patroni.request.assert_called_once() self.assertEqual(self.ha.patroni.request.call_args[1]['timeout'], 30) self.ha.patroni.request = Mock(side_effect=Exception) with patch('patroni.ha.logger.warning') as mock_logger: self.ha.notify_citus_coordinator('before_promote') self.assertEqual(self.ha.patroni.request.call_args[1]['timeout'], 2) mock_logger.assert_called() self.assertTrue(mock_logger.call_args[0][0].startswith('Request to Citus coordinator')) patroni-3.2.2/tests/test_kubernetes.py000066400000000000000000000647161455170150700201370ustar00rootroot00000000000000import base64 import datetime import json import mock import socket import time import unittest import urllib3 from mock import Mock, PropertyMock, mock_open, patch from patroni.dcs.kubernetes import Cluster, k8s_client, k8s_config, K8sConfig, K8sConnectionFailed, \ K8sException, K8sObject, Kubernetes, KubernetesError, KubernetesRetriableException, \ Retry, RetryFailedError, SERVICE_HOST_ENV_NAME, SERVICE_PORT_ENV_NAME from threading import Thread from . import MockResponse, SleepException def mock_list_namespaced_config_map(*args, **kwargs): metadata = {'resource_version': '1', 'labels': {'f': 'b'}, 'name': 'test-config', 'annotations': {'initialize': '123', 'config': '{}'}} items = [k8s_client.V1ConfigMap(metadata=k8s_client.V1ObjectMeta(**metadata))] metadata.update({'name': 'test-leader', 'annotations': {'optime': '1234x', 'leader': 'p-0', 'ttl': '30s', 'slots': '{', 'failsafe': '{'}}) items.append(k8s_client.V1ConfigMap(metadata=k8s_client.V1ObjectMeta(**metadata))) metadata.update({'name': 'test-failover', 'annotations': {'leader': 'p-0'}}) items.append(k8s_client.V1ConfigMap(metadata=k8s_client.V1ObjectMeta(**metadata))) metadata.update({'name': 'test-sync', 'annotations': {'leader': 'p-0'}}) items.append(k8s_client.V1ConfigMap(metadata=k8s_client.V1ObjectMeta(**metadata))) metadata.update({'name': 'test-0-leader', 'labels': {Kubernetes._CITUS_LABEL: '0'}, 'annotations': {'optime': '1234x', 'leader': 'p-0', 'ttl': '30s', 'slots': '{', 'failsafe': '{'}}) items.append(k8s_client.V1ConfigMap(metadata=k8s_client.V1ObjectMeta(**metadata))) metadata.update({'name': 'test-0-config', 'labels': {Kubernetes._CITUS_LABEL: '0'}, 'annotations': {'initialize': '123', 'config': '{}'}}) items.append(k8s_client.V1ConfigMap(metadata=k8s_client.V1ObjectMeta(**metadata))) metadata.update({'name': 'test-1-leader', 'labels': {Kubernetes._CITUS_LABEL: '1'}, 'annotations': {'leader': 'p-3', 'ttl': '30s'}}) items.append(k8s_client.V1ConfigMap(metadata=k8s_client.V1ObjectMeta(**metadata))) metadata.update({'name': 'test-2-config', 'labels': {Kubernetes._CITUS_LABEL: '2'}, 'annotations': {}}) items.append(k8s_client.V1ConfigMap(metadata=k8s_client.V1ObjectMeta(**metadata))) metadata = k8s_client.V1ObjectMeta(resource_version='1') return k8s_client.V1ConfigMapList(metadata=metadata, items=items, kind='ConfigMapList') def mock_read_namespaced_endpoints(*args, **kwargs): target_ref = k8s_client.V1ObjectReference(kind='Pod', resource_version='10', name='p-0', namespace='default', uid='964dfeae-e79b-4476-8a5a-1920b5c2a69d') address0 = k8s_client.V1EndpointAddress(ip='10.0.0.0', target_ref=target_ref) address1 = k8s_client.V1EndpointAddress(ip='10.0.0.1') port = k8s_client.V1EndpointPort(port=5432, name='postgresql', protocol='TCP') subset = k8s_client.V1EndpointSubset(addresses=[address1, address0], ports=[port]) metadata = k8s_client.V1ObjectMeta(resource_version='1', labels={'f': 'b'}, name='test', annotations={'optime': '1234', 'leader': 'p-0', 'ttl': '30s'}) return k8s_client.V1Endpoints(subsets=[subset], metadata=metadata) def mock_list_namespaced_endpoints(*args, **kwargs): return k8s_client.V1EndpointsList(metadata=k8s_client.V1ObjectMeta(resource_version='1'), items=[mock_read_namespaced_endpoints()], kind='V1EndpointsList') def mock_list_namespaced_pod(*args, **kwargs): metadata = k8s_client.V1ObjectMeta(resource_version='1', labels={'f': 'b', Kubernetes._CITUS_LABEL: '1'}, name='p-0', annotations={'status': '{}'}, uid='964dfeae-e79b-4476-8a5a-1920b5c2a69d') status = k8s_client.V1PodStatus(pod_ip='10.0.0.1') spec = k8s_client.V1PodSpec(hostname='p-0', node_name='kind-control-plane', containers=[]) items = [k8s_client.V1Pod(metadata=metadata, status=status, spec=spec)] return k8s_client.V1PodList(items=items, kind='PodList') def mock_namespaced_kind(*args, **kwargs): mock = Mock() mock.metadata.resource_version = '2' return mock def mock_load_k8s_config(self, *args, **kwargs): self._server = '' class TestK8sConfig(unittest.TestCase): def test_load_incluster_config(self): for env in ({}, {SERVICE_HOST_ENV_NAME: '', SERVICE_PORT_ENV_NAME: ''}): with patch('os.environ', env): self.assertRaises(k8s_config.ConfigException, k8s_config.load_incluster_config) with patch('os.environ', {SERVICE_HOST_ENV_NAME: 'a', SERVICE_PORT_ENV_NAME: '1'}), \ patch('os.path.isfile', Mock(side_effect=[False, True, True, False, True, True, True, True])), \ patch('builtins.open', Mock(side_effect=[ mock_open()(), mock_open(read_data='a')(), mock_open(read_data='a')(), mock_open()(), mock_open(read_data='a')(), mock_open(read_data='a')()])): for _ in range(0, 4): self.assertRaises(k8s_config.ConfigException, k8s_config.load_incluster_config) k8s_config.load_incluster_config() self.assertEqual(k8s_config.server, 'https://a:1') self.assertEqual(k8s_config.headers.get('authorization'), 'Bearer a') def test_refresh_token(self): with patch('os.environ', {SERVICE_HOST_ENV_NAME: 'a', SERVICE_PORT_ENV_NAME: '1'}), \ patch('os.path.isfile', Mock(side_effect=[True, True, False, True, True, True])), \ patch('builtins.open', Mock(side_effect=[ mock_open(read_data='cert')(), mock_open(read_data='a')(), mock_open()(), mock_open(read_data='b')(), mock_open(read_data='c')()])): k8s_config.load_incluster_config(token_refresh_interval=datetime.timedelta(milliseconds=100)) self.assertEqual(k8s_config.headers.get('authorization'), 'Bearer a') time.sleep(0.1) # token file doesn't exist self.assertEqual(k8s_config.headers.get('authorization'), 'Bearer a') # token file is empty self.assertEqual(k8s_config.headers.get('authorization'), 'Bearer a') # token refreshed self.assertEqual(k8s_config.headers.get('authorization'), 'Bearer b') time.sleep(0.1) # token refreshed self.assertEqual(k8s_config.headers.get('authorization'), 'Bearer c') # no need to refresh token self.assertEqual(k8s_config.headers.get('authorization'), 'Bearer c') def test_load_kube_config(self): config = { "current-context": "local", "contexts": [{"name": "local", "context": {"user": "local", "cluster": "local"}}], "clusters": [{"name": "local", "cluster": {"server": "https://a:1/", "certificate-authority": "a"}}], "users": [{"name": "local", "user": {"username": "a", "password": "b", "client-certificate": "c"}}] } with patch('builtins.open', mock_open(read_data=json.dumps(config))): k8s_config.load_kube_config() self.assertEqual(k8s_config.server, 'https://a:1') self.assertEqual(k8s_config.pool_config, {'ca_certs': 'a', 'cert_file': 'c', 'cert_reqs': 'CERT_REQUIRED', 'maxsize': 10, 'num_pools': 10}) config["users"][0]["user"]["token"] = "token" with patch('builtins.open', mock_open(read_data=json.dumps(config))): k8s_config.load_kube_config() self.assertEqual(k8s_config.headers.get('authorization'), 'Bearer token') config["users"][0]["user"]["client-key-data"] = base64.b64encode(b'foobar').decode('utf-8') config["clusters"][0]["cluster"]["certificate-authority-data"] = base64.b64encode(b'foobar').decode('utf-8') with patch('builtins.open', mock_open(read_data=json.dumps(config))), \ patch('os.write', Mock()), patch('os.close', Mock()), \ patch('os.remove') as mock_remove, \ patch('atexit.register') as mock_atexit, \ patch('tempfile.mkstemp') as mock_mkstemp: mock_mkstemp.side_effect = [(3, '1.tmp'), (4, '2.tmp')] k8s_config.load_kube_config() mock_atexit.assert_called_once() mock_remove.side_effect = OSError mock_atexit.call_args[0][0]() # call _cleanup_temp_files mock_remove.assert_has_calls([mock.call('1.tmp'), mock.call('2.tmp')]) @patch('urllib3.PoolManager.request') class TestApiClient(unittest.TestCase): @patch.object(K8sConfig, '_server', '', create=True) @patch('urllib3.PoolManager.request', Mock()) def setUp(self): self.a = k8s_client.ApiClient(True) self.mock_get_ep = MockResponse() self.mock_get_ep.content = '{"subsets":[{"ports":[{"name":"https","protocol":"TCP","port":443}],' +\ '"addresses":[{"ip":"127.0.0.1"},{"ip":"127.0.0.2"}]}]}' def test__do_http_request(self, mock_request): mock_request.side_effect = [self.mock_get_ep] + [socket.timeout] self.assertRaises(K8sException, self.a.call_api, 'GET', 'f') @patch('time.sleep', Mock()) def test_request(self, mock_request): retry = Retry(deadline=10, max_delay=1, max_tries=1, retry_exceptions=KubernetesRetriableException) mock_request.side_effect = [self.mock_get_ep] + 3 * [socket.timeout] + [k8s_client.rest.ApiException(500, '')] self.assertRaises(k8s_client.rest.ApiException, retry, self.a.call_api, 'GET', 'f', _retry=retry) mock_request.side_effect = [self.mock_get_ep, socket.timeout, Mock(), self.mock_get_ep] self.assertRaises(k8s_client.rest.ApiException, retry, self.a.call_api, 'GET', 'f', _retry=retry) retry.deadline = 0.0001 mock_request.side_effect = [socket.timeout, socket.timeout, self.mock_get_ep] self.assertRaises(K8sConnectionFailed, retry, self.a.call_api, 'GET', 'f', _retry=retry) def test__refresh_api_servers_cache(self, mock_request): mock_request.side_effect = k8s_client.rest.ApiException(403, '') self.a.refresh_api_servers_cache() class TestCoreV1Api(unittest.TestCase): @patch('urllib3.PoolManager.request', Mock()) @patch.object(K8sConfig, '_server', '', create=True) def setUp(self): self.a = k8s_client.CoreV1Api() self.a._api_client.pool_manager.request = Mock(return_value=MockResponse()) def test_create_namespaced_service(self): self.assertEqual(str(self.a.create_namespaced_service('default', {}, _request_timeout=2)), '{}') def test_list_namespaced_endpoints(self): self.a._api_client.pool_manager.request.return_value.content = '{"items": [1,2,3]}' self.assertIsInstance(self.a.list_namespaced_endpoints('default'), K8sObject) def test_patch_namespaced_config_map(self): self.assertEqual(str(self.a.patch_namespaced_config_map('foo', 'default', {}, _request_timeout=(1, 2))), '{}') def test_list_namespaced_pod(self): self.a._api_client.pool_manager.request.return_value.status_code = 409 self.a._api_client.pool_manager.request.return_value.content = 'foo' try: self.a.list_namespaced_pod('default', label_selector='foo=bar') self.assertFail() except k8s_client.rest.ApiException as e: self.assertTrue('Reason: ' in str(e)) def test_delete_namespaced_pod(self): self.assertEqual(str(self.a.delete_namespaced_pod('foo', 'default', _request_timeout=(1, 2), body={})), '{}') class BaseTestKubernetes(unittest.TestCase): @patch('urllib3.PoolManager.request', Mock()) @patch('socket.TCP_KEEPIDLE', 4, create=True) @patch('socket.TCP_KEEPINTVL', 5, create=True) @patch('socket.TCP_KEEPCNT', 6, create=True) @patch.object(Thread, 'start', Mock()) @patch.object(K8sConfig, 'load_kube_config', mock_load_k8s_config) @patch.object(K8sConfig, 'load_incluster_config', Mock(side_effect=k8s_config.ConfigException)) @patch.object(k8s_client.CoreV1Api, 'list_namespaced_pod', mock_list_namespaced_pod, create=True) @patch.object(k8s_client.CoreV1Api, 'list_namespaced_config_map', mock_list_namespaced_config_map, create=True) def setUp(self, config=None): config = config or {} config.update(ttl=30, scope='test', name='p-0', loop_wait=10, group=0, retry_timeout=10, labels={'f': 'b'}, bypass_api_service=True) self.k = Kubernetes(config) self.k._citus_group = None self.assertRaises(AttributeError, self.k._pods._build_cache) self.k._pods._is_ready = True self.assertRaises(TypeError, self.k._kinds._build_cache) self.k._kinds._is_ready = True self.k.get_cluster() @patch.object(k8s_client.CoreV1Api, 'patch_namespaced_config_map', mock_namespaced_kind, create=True) class TestKubernetesConfigMaps(BaseTestKubernetes): @patch('time.time', Mock(side_effect=[1, 10.9, 100])) def test__wait_caches(self): self.k._pods._is_ready = False with self.k._condition: self.assertRaises(RetryFailedError, self.k._wait_caches, time.time() + 10) @patch('time.time', Mock(return_value=time.time() + 100)) def test_get_cluster(self): self.k.get_cluster() with patch.object(Kubernetes, '_wait_caches', Mock(side_effect=Exception)): self.assertRaises(KubernetesError, self.k.get_cluster) def test__get_citus_cluster(self): self.k._citus_group = '0' cluster = self.k.get_cluster() self.assertIsInstance(cluster, Cluster) self.assertIsInstance(cluster.workers[1], Cluster) @patch('patroni.dcs.kubernetes.logger.error') def test_get_citus_coordinator(self, mock_logger): self.assertIsInstance(self.k.get_citus_coordinator(), Cluster) with patch.object(Kubernetes, '_cluster_loader', Mock(side_effect=Exception)): self.assertIsNone(self.k.get_citus_coordinator()) mock_logger.assert_called() self.assertTrue(mock_logger.call_args[0][0].startswith('Failed to load Citus coordinator')) def test_attempt_to_acquire_leader(self): with patch.object(k8s_client.CoreV1Api, 'patch_namespaced_config_map', create=True) as mock_patch: mock_patch.side_effect = K8sException self.assertRaises(KubernetesError, self.k.attempt_to_acquire_leader) mock_patch.side_effect = k8s_client.rest.ApiException(409, '') self.assertFalse(self.k.attempt_to_acquire_leader()) def test_take_leader(self): self.k.take_leader() self.k._leader_observed_record['leader'] = 'test' self.k.patch_or_create = Mock(return_value=False) self.k.take_leader() def test_manual_failover(self): with patch.object(k8s_client.CoreV1Api, 'patch_namespaced_config_map', Mock(side_effect=RetryFailedError('')), create=True): self.k.manual_failover('foo', 'bar') def test_set_config_value(self): with patch.object(k8s_client.CoreV1Api, 'patch_namespaced_config_map', Mock(side_effect=k8s_client.rest.ApiException(409, '')), create=True): self.k.set_config_value('{}', 1) @patch.object(k8s_client.CoreV1Api, 'patch_namespaced_pod', create=True) def test_touch_member(self, mock_patch_namespaced_pod): mock_patch_namespaced_pod.return_value.metadata.resource_version = '10' self.k.touch_member({'role': 'replica'}) self.k._name = 'p-1' self.k.touch_member({'state': 'running', 'role': 'replica'}) self.k.touch_member({'state': 'stopped', 'role': 'primary'}) self.k._role_label = 'isMaster' self.k._leader_label_value = 'true' self.k._follower_label_value = 'false' self.k._standby_leader_label_value = 'false' self.k._tmp_role_label = 'tmp_role' self.k.touch_member({'state': 'running', 'role': 'replica'}) mock_patch_namespaced_pod.assert_called() self.assertEqual(mock_patch_namespaced_pod.call_args[0][2].metadata.labels['isMaster'], 'false') self.assertEqual(mock_patch_namespaced_pod.call_args[0][2].metadata.labels['tmp_role'], 'replica') mock_patch_namespaced_pod.rest_mock() self.k._name = 'p-0' self.k.touch_member({'role': 'standby_leader'}) mock_patch_namespaced_pod.assert_called() self.assertEqual(mock_patch_namespaced_pod.call_args[0][2].metadata.labels['isMaster'], 'false') self.assertEqual(mock_patch_namespaced_pod.call_args[0][2].metadata.labels['tmp_role'], 'master') mock_patch_namespaced_pod.rest_mock() self.k.touch_member({'role': 'primary'}) mock_patch_namespaced_pod.assert_called() self.assertEqual(mock_patch_namespaced_pod.call_args[0][2].metadata.labels['isMaster'], 'true') self.assertEqual(mock_patch_namespaced_pod.call_args[0][2].metadata.labels['tmp_role'], 'master') def test_initialize(self): self.k.initialize() def test_delete_leader(self): self.k.delete_leader(self.k.get_cluster().leader, 1) def test_cancel_initialization(self): self.k.cancel_initialization() @patch.object(k8s_client.CoreV1Api, 'delete_collection_namespaced_config_map', Mock(side_effect=k8s_client.rest.ApiException(403, '')), create=True) def test_delete_cluster(self): self.k.delete_cluster() def test_watch(self): self.k.set_ttl(10) self.k.watch(None, 0) self.k.watch('5', 0) def test_set_history_value(self): self.k.set_history_value('{}') @patch('patroni.dcs.kubernetes.logger.warning') def test_reload_config(self, mock_warning): self.k.reload_config({'loop_wait': 10, 'ttl': 30, 'retry_timeout': 10, 'retriable_http_codes': '401, 403 '}) self.assertEqual(self.k._api._retriable_http_codes, self.k._api._DEFAULT_RETRIABLE_HTTP_CODES | set([401, 403])) self.k.reload_config({'loop_wait': 10, 'ttl': 30, 'retry_timeout': 10, 'retriable_http_codes': 402}) self.assertEqual(self.k._api._retriable_http_codes, self.k._api._DEFAULT_RETRIABLE_HTTP_CODES | set([402])) self.k.reload_config({'loop_wait': 10, 'ttl': 30, 'retry_timeout': 10, 'retriable_http_codes': [405, 406]}) self.assertEqual(self.k._api._retriable_http_codes, self.k._api._DEFAULT_RETRIABLE_HTTP_CODES | set([405, 406])) self.k.reload_config({'loop_wait': 10, 'ttl': 30, 'retry_timeout': 10, 'retriable_http_codes': True}) mock_warning.assert_called_once() class TestKubernetesEndpointsNoPodIP(BaseTestKubernetes): @patch.object(k8s_client.CoreV1Api, 'list_namespaced_endpoints', mock_list_namespaced_endpoints, create=True) def setUp(self, config=None): super(TestKubernetesEndpointsNoPodIP, self).setUp({'use_endpoints': True}) @patch.object(k8s_client.CoreV1Api, 'patch_namespaced_endpoints', create=True) def test_update_leader(self, mock_patch_namespaced_endpoints): leader = self.k.get_cluster().leader self.assertIsNotNone(self.k.update_leader(leader, '123', failsafe={'foo': 'bar'})) args = mock_patch_namespaced_endpoints.call_args[0] self.assertEqual(args[2].subsets[0].addresses[0].target_ref.resource_version, '1') self.assertEqual(args[2].subsets[0].addresses[0].ip, '10.0.0.1') class TestKubernetesEndpoints(BaseTestKubernetes): @patch.object(k8s_client.CoreV1Api, 'list_namespaced_endpoints', mock_list_namespaced_endpoints, create=True) def setUp(self, config=None): super(TestKubernetesEndpoints, self).setUp({'use_endpoints': True, 'pod_ip': '10.0.0.0'}) @patch.object(k8s_client.CoreV1Api, 'patch_namespaced_endpoints', create=True) def test_update_leader(self, mock_patch_namespaced_endpoints): leader = self.k.get_cluster().leader self.assertIsNotNone(self.k.update_leader(leader, '123', failsafe={'foo': 'bar'})) args = mock_patch_namespaced_endpoints.call_args[0] self.assertEqual(args[2].subsets[0].addresses[0].target_ref.resource_version, '10') self.assertEqual(args[2].subsets[0].addresses[0].ip, '10.0.0.0') self.k._kinds._object_cache['test'].subsets[:] = [] self.assertIsNotNone(self.k.update_leader(leader, '123')) self.k._kinds._object_cache['test'].metadata.annotations['leader'] = 'p-1' self.assertFalse(self.k.update_leader(leader, '123')) @patch.object(k8s_client.CoreV1Api, 'read_namespaced_endpoints', create=True) @patch.object(k8s_client.CoreV1Api, 'patch_namespaced_endpoints', create=True) def test__update_leader_with_retry(self, mock_patch, mock_read): leader = self.k.get_cluster().leader mock_read.return_value = mock_read_namespaced_endpoints() mock_patch.side_effect = k8s_client.rest.ApiException(502, '') self.assertFalse(self.k.update_leader(leader, '123')) mock_patch.side_effect = RetryFailedError('') self.assertRaises(KubernetesError, self.k.update_leader, leader, '123') mock_patch.side_effect = k8s_client.rest.ApiException(409, '') with patch('time.time', Mock(side_effect=[0, 100, 200, 0, 0, 0, 0, 100, 200])): self.assertFalse(self.k.update_leader(leader, '123')) self.assertFalse(self.k.update_leader(leader, '123')) self.assertFalse(self.k.update_leader(leader, '123')) mock_patch.side_effect = [k8s_client.rest.ApiException(409, ''), mock_namespaced_kind()] mock_read.return_value.metadata.resource_version = '2' self.assertIsNotNone(self.k._update_leader_with_retry({}, '1', [])) mock_patch.side_effect = k8s_client.rest.ApiException(409, '') mock_read.side_effect = RetryFailedError('') self.assertRaises(KubernetesError, self.k.update_leader, leader, '123') mock_read.side_effect = Exception self.assertFalse(self.k.update_leader(leader, '123')) @patch.object(k8s_client.CoreV1Api, 'patch_namespaced_endpoints', Mock(side_effect=[k8s_client.rest.ApiException(500, ''), k8s_client.rest.ApiException(502, '')]), create=True) def test_delete_sync_state(self): self.assertFalse(self.k.delete_sync_state(1)) @patch.object(k8s_client.CoreV1Api, 'patch_namespaced_endpoints', mock_namespaced_kind, create=True) def test_write_sync_state(self): self.assertIsNotNone(self.k.write_sync_state('a', ['b'], 1)) @patch.object(k8s_client.CoreV1Api, 'patch_namespaced_pod', mock_namespaced_kind, create=True) @patch.object(k8s_client.CoreV1Api, 'create_namespaced_endpoints', mock_namespaced_kind, create=True) @patch.object(k8s_client.CoreV1Api, 'create_namespaced_service', Mock(side_effect=[True, False, k8s_client.rest.ApiException(409, ''), k8s_client.rest.ApiException(403, ''), k8s_client.rest.ApiException(500, ''), Exception("Unexpected") ]), create=True) @patch('patroni.dcs.kubernetes.logger.exception') def test__create_config_service(self, mock_logger_exception): self.assertIsNotNone(self.k.patch_or_create_config({'foo': 'bar'})) self.assertIsNotNone(self.k.patch_or_create_config({'foo': 'bar'})) self.k.patch_or_create_config({'foo': 'bar'}) mock_logger_exception.assert_not_called() self.k.patch_or_create_config({'foo': 'bar'}) mock_logger_exception.assert_not_called() self.k.patch_or_create_config({'foo': 'bar'}) mock_logger_exception.assert_called_once() self.assertEqual(('create_config_service failed',), mock_logger_exception.call_args[0]) mock_logger_exception.reset_mock() self.k.touch_member({'state': 'running', 'role': 'replica'}) mock_logger_exception.assert_called_once() self.assertEqual(('create_config_service failed',), mock_logger_exception.call_args[0]) @patch.object(k8s_client.CoreV1Api, 'patch_namespaced_endpoints', mock_namespaced_kind, create=True) def test_write_leader_optime(self): self.k.write_leader_optime(12345) def mock_watch(*args): return urllib3.HTTPResponse() class TestCacheBuilder(BaseTestKubernetes): @patch.object(k8s_client.CoreV1Api, 'list_namespaced_config_map', mock_list_namespaced_config_map, create=True) @patch('patroni.dcs.kubernetes.ObjectCache._watch', mock_watch) @patch.object(urllib3.HTTPResponse, 'read_chunked') def test__build_cache(self, mock_read_chunked): self.k._citus_group = '0' mock_read_chunked.return_value = [json.dumps( {'type': 'MODIFIED', 'object': {'metadata': { 'name': self.k.config_path, 'resourceVersion': '2', 'annotations': {self.k._CONFIG: 'foo'}}}} ).encode('utf-8'), ('\n' + json.dumps( {'type': 'DELETED', 'object': {'metadata': { 'name': self.k.config_path, 'resourceVersion': '3'}}} ) + '\n' + json.dumps( {'type': 'MDIFIED', 'object': {'metadata': {'name': self.k.config_path}}} ) + '\n').encode('utf-8'), b'{"object":{', b'"code":410}}\n'] self.k._kinds._build_cache() @patch('patroni.dcs.kubernetes.logger.error', Mock(side_effect=SleepException)) @patch('patroni.dcs.kubernetes.ObjectCache._build_cache', Mock(side_effect=Exception)) def test_run(self): self.assertRaises(SleepException, self.k._pods.run) @patch('time.sleep', Mock()) def test__list(self): self.k._pods._func = Mock(side_effect=Exception) self.assertRaises(Exception, self.k._pods._list) @patch('patroni.dcs.kubernetes.ObjectCache._watch', Mock(return_value=None)) def test__do_watch(self): self.assertRaises(AttributeError, self.k._kinds._do_watch, '1') @patch.object(k8s_client.CoreV1Api, 'list_namespaced_config_map', mock_list_namespaced_config_map, create=True) @patch('patroni.dcs.kubernetes.ObjectCache._watch', mock_watch) @patch.object(urllib3.HTTPResponse, 'read_chunked', Mock(return_value=[])) def test_kill_stream(self): self.k._kinds.kill_stream() with patch.object(urllib3.HTTPResponse, 'connection') as mock_connection: mock_connection.sock.close.side_effect = Exception self.k._kinds._do_watch('1') self.k._kinds.kill_stream() with patch.object(urllib3.HTTPResponse, 'connection', PropertyMock(side_effect=Exception)): self.k._kinds.kill_stream() patroni-3.2.2/tests/test_log.py000066400000000000000000000051601455170150700165350ustar00rootroot00000000000000import logging import os import sys import unittest import yaml from mock import Mock, patch from patroni.config import Config from patroni.log import PatroniLogger from queue import Queue, Full _LOG = logging.getLogger(__name__) class TestPatroniLogger(unittest.TestCase): def setUp(self): self._handlers = logging.getLogger().handlers[:] def tearDown(self): logging.getLogger().handlers[:] = self._handlers @patch('logging.FileHandler._open', Mock()) def test_patroni_logger(self): config = { 'log': { 'traceback_level': 'DEBUG', 'max_queue_size': 5, 'dir': 'foo', 'file_size': 4096, 'file_num': 5, 'loggers': { 'foo.bar': 'INFO' } }, 'restapi': {}, 'postgresql': {'data_dir': 'foo'} } sys.argv = ['patroni.py'] os.environ[Config.PATRONI_CONFIG_VARIABLE] = yaml.dump(config, default_flow_style=False) logger = PatroniLogger() patroni_config = Config(None) logger.reload_config(patroni_config['log']) _LOG.exception('test') logger.start() with patch.object(logging.Handler, 'format', Mock(side_effect=Exception)), \ patch('_pytest.logging.LogCaptureHandler.emit', Mock()): logging.error('test') self.assertEqual(logger.log_handler.maxBytes, config['log']['file_size']) self.assertEqual(logger.log_handler.backupCount, config['log']['file_num']) config['log']['level'] = 'DEBUG' config['log'].pop('dir') with patch('logging.Handler.close', Mock(side_effect=Exception)): logger.reload_config(config['log']) with patch.object(logging.Logger, 'makeRecord', Mock(side_effect=[logging.LogRecord('', logging.INFO, '', 0, '', (), None), Exception])): logging.exception('test') logging.error('test') with patch.object(Queue, 'put_nowait', Mock(side_effect=Full)): self.assertRaises(SystemExit, logger.shutdown) self.assertRaises(Exception, logger.shutdown) self.assertLessEqual(logger.queue_size, 2) # "Failed to close the old log handler" could be still in the queue self.assertEqual(logger.records_lost, 0) def test_interceptor(self): logger = PatroniLogger() logger.reload_config({'level': 'INFO'}) logger.start() _LOG.info('Lock owner: ') _LOG.info('blabla') logger.shutdown() self.assertEqual(logger.records_lost, 0) patroni-3.2.2/tests/test_patroni.py000066400000000000000000000306531455170150700174350ustar00rootroot00000000000000import etcd import logging import os import signal import time import unittest import patroni.config as config from http.server import HTTPServer from mock import Mock, PropertyMock, patch from patroni.api import RestApiServer from patroni.async_executor import AsyncExecutor from patroni.dcs import Cluster, Member from patroni.dcs.etcd import AbstractEtcdClientWithFailover from patroni.exceptions import DCSError from patroni.postgresql import Postgresql from patroni.postgresql.config import ConfigHandler from patroni.__main__ import check_psycopg, Patroni, main as _main from threading import Thread from . import psycopg_connect, SleepException from .test_etcd import etcd_read, etcd_write from .test_postgresql import MockPostmaster def mock_import(*args, **kwargs): ret = Mock() ret.__version__ = '2.5.3.dev1 a b c' if args[0] == 'psycopg2' else '3.1.0' return ret def mock_import2(*args, **kwargs): if args[0] == 'psycopg2': raise ImportError ret = Mock() ret.__version__ = '0.1.2' return ret class MockFrozenImporter(object): toc = set(['patroni.dcs.etcd']) @patch('time.sleep', Mock()) @patch('subprocess.call', Mock(return_value=0)) @patch('patroni.psycopg.connect', psycopg_connect) @patch('urllib3.PoolManager.request', Mock(side_effect=Exception)) @patch.object(ConfigHandler, 'append_pg_hba', Mock()) @patch.object(ConfigHandler, 'write_postgresql_conf', Mock()) @patch.object(ConfigHandler, 'write_recovery_conf', Mock()) @patch.object(Postgresql, 'is_running', Mock(return_value=MockPostmaster())) @patch.object(Postgresql, 'call_nowait', Mock()) @patch.object(HTTPServer, '__init__', Mock()) @patch.object(AsyncExecutor, 'run', Mock()) @patch.object(etcd.Client, 'write', etcd_write) @patch.object(etcd.Client, 'read', etcd_read) class TestPatroni(unittest.TestCase): @patch('sys.argv', ['patroni.py']) def test_no_config(self): self.assertRaises(SystemExit, _main) @patch('sys.argv', ['patroni.py', '--validate-config', 'postgres0.yml']) @patch('socket.socket.connect_ex', Mock(return_value=1)) def test_validate_config(self): self.assertRaises(SystemExit, _main) with patch.object(config.Config, '__init__', Mock(return_value=None)): self.assertRaises(SystemExit, _main) @patch('pkgutil.iter_importers', Mock(return_value=[MockFrozenImporter()])) @patch('urllib3.PoolManager.request', Mock(side_effect=Exception)) @patch('sys.frozen', Mock(return_value=True), create=True) @patch.object(HTTPServer, '__init__', Mock()) @patch.object(etcd.Client, 'read', etcd_read) @patch.object(Thread, 'start', Mock()) @patch.object(AbstractEtcdClientWithFailover, '_get_machines_list', Mock(return_value=['http://remotehost:2379'])) @patch.object(Postgresql, '_get_gucs', Mock(return_value={'foo': True, 'bar': True})) def setUp(self): self._handlers = logging.getLogger().handlers[:] RestApiServer._BaseServer__is_shut_down = Mock() RestApiServer._BaseServer__shutdown_request = True RestApiServer.socket = 0 os.environ['PATRONI_POSTGRESQL_DATA_DIR'] = 'data/test0' conf = config.Config('postgres0.yml') self.p = Patroni(conf) def tearDown(self): logging.getLogger().handlers[:] = self._handlers @patch('patroni.dcs.AbstractDCS.get_cluster', Mock(side_effect=[None, DCSError('foo'), None])) def test_load_dynamic_configuration(self): self.p.config._dynamic_configuration = {} self.p.load_dynamic_configuration() self.p.load_dynamic_configuration() @patch('sys.argv', ['patroni.py', 'postgres0.yml']) @patch('time.sleep', Mock(side_effect=SleepException)) @patch.object(etcd.Client, 'delete', Mock()) @patch.object(AbstractEtcdClientWithFailover, '_get_machines_list', Mock(return_value=['http://remotehost:2379'])) @patch.object(Thread, 'join', Mock()) @patch.object(Postgresql, '_get_gucs', Mock(return_value={'foo': True, 'bar': True})) def test_patroni_patroni_main(self): with patch('subprocess.call', Mock(return_value=1)): with patch.object(Patroni, 'run', Mock(side_effect=SleepException)): os.environ['PATRONI_POSTGRESQL_DATA_DIR'] = 'data/test0' self.assertRaises(SleepException, _main) with patch.object(Patroni, 'run', Mock(side_effect=KeyboardInterrupt())): with patch('patroni.ha.Ha.is_paused', Mock(return_value=True)): os.environ['PATRONI_POSTGRESQL_DATA_DIR'] = 'data/test0' _main() @patch('os.getpid') @patch('multiprocessing.Process') @patch('patroni.__main__.patroni_main', Mock()) @patch('sys.argv', ['patroni.py', 'postgres0.yml']) def test_patroni_main(self, mock_process, mock_getpid): mock_getpid.return_value = 2 _main() mock_getpid.return_value = 1 def mock_signal(signo, handler): handler(signo, None) with patch('signal.signal', mock_signal): with patch('os.waitpid', Mock(side_effect=[(1, 0), (0, 0)])): _main() with patch('os.waitpid', Mock(side_effect=OSError)): _main() ref = {'passtochild': lambda signo, stack_frame: 0} def mock_sighup(signo, handler): if hasattr(signal, 'SIGHUP') and signo == signal.SIGHUP: ref['passtochild'] = handler def mock_join(): ref['passtochild'](0, None) mock_process.return_value.join = mock_join with patch('signal.signal', mock_sighup), patch('os.kill', Mock()): self.assertIsNone(_main()) @patch('patroni.config.Config.save_cache', Mock()) @patch('patroni.config.Config.reload_local_configuration', Mock(return_value=True)) @patch('patroni.ha.Ha.is_leader', Mock(return_value=True)) @patch.object(Postgresql, 'state', PropertyMock(return_value='running')) @patch.object(Postgresql, 'data_directory_empty', Mock(return_value=False)) def test_run(self): self.p.postgresql.set_role('replica') self.p.sighup_handler() self.p.ha.dcs.watch = Mock(side_effect=SleepException) self.p.api.start = Mock() self.p.logger.start = Mock() self.p.config._dynamic_configuration = {} with patch('patroni.dcs.Cluster.is_unlocked', Mock(return_value=True)): self.assertRaises(SleepException, self.p.run) with patch('patroni.config.Config.reload_local_configuration', Mock(return_value=False)): self.p.sighup_handler() self.assertRaises(SleepException, self.p.run) with patch('patroni.config.Config.set_dynamic_configuration', Mock(return_value=True)): self.assertRaises(SleepException, self.p.run) with patch('patroni.postgresql.Postgresql.data_directory_empty', Mock(return_value=False)): self.assertRaises(SleepException, self.p.run) def test_sigterm_handler(self): self.assertRaises(SystemExit, self.p.sigterm_handler) def test_schedule_next_run(self): self.p.ha.cluster = Mock() self.p.ha.dcs.watch = Mock(return_value=True) self.p.schedule_next_run() self.p.next_run = time.time() - self.p.dcs.loop_wait - 1 self.p.schedule_next_run() def test__filter_tags(self): tags = {'noloadbalance': False, 'clonefrom': False, 'nosync': False, 'smth': 'random'} self.assertEqual(self.p._filter_tags(tags), {'smth': 'random'}) tags['clonefrom'] = True tags['smth'] = False self.assertEqual(self.p._filter_tags(tags), {'clonefrom': True, 'smth': False}) tags = {'nofailover': False, 'failover_priority': 0} self.assertEqual(self.p._filter_tags(tags), tags) tags = {'nofailover': True, 'failover_priority': 1} self.assertEqual(self.p._filter_tags(tags), tags) def test_noloadbalance(self): self.p.tags['noloadbalance'] = True self.assertTrue(self.p.noloadbalance) def test_nofailover(self): for (nofailover, failover_priority, expected) in [ # Without any tags, default is False (None, None, False), # Setting `nofailover: True` has precedence (True, 0, True), (True, 1, True), ('False', 1, True), # because we use bool() for the value # Similarly, setting `nofailover: False` has precedence (False, 0, False), (False, 1, False), ('', 0, False), # Only when we have `nofailover: None` should we got based on priority (None, 0, True), (None, 1, False), ]: with self.subTest(nofailover=nofailover, failover_priority=failover_priority, expected=expected): self.p.tags['nofailover'] = nofailover self.p.tags['failover_priority'] = failover_priority self.assertEqual(self.p.nofailover, expected) def test_failover_priority(self): for (nofailover, failover_priority, expected) in [ # Without any tags, default is 1 (None, None, 1), # Setting `nofailover: True` has precedence (value 0) (True, 0, 0), (True, 1, 0), # Setting `nofailover: False` and `failover_priority: None` gives 1 (False, None, 1), # Normal function of failover_priority (None, 0, 0), (None, 1, 1), (None, 2, 2), ]: with self.subTest(nofailover=nofailover, failover_priority=failover_priority, expected=expected): self.p.tags['nofailover'] = nofailover self.p.tags['failover_priority'] = failover_priority self.assertEqual(self.p.failover_priority, expected) def test_replicatefrom(self): self.assertIsNone(self.p.replicatefrom) self.p.tags['replicatefrom'] = 'foo' self.assertEqual(self.p.replicatefrom, 'foo') def test_reload_config(self): self.p.reload_config() self.p._get_tags = Mock(side_effect=Exception) self.p.reload_config(local=True) def test_nosync(self): self.p.tags['nosync'] = True self.assertTrue(self.p.nosync) self.p.tags['nosync'] = None self.assertFalse(self.p.nosync) @patch.object(Thread, 'join', Mock()) def test_shutdown(self): self.p.api.shutdown = Mock(side_effect=Exception) self.p.ha.shutdown = Mock(side_effect=Exception) self.p.shutdown() def test_check_psycopg(self): with patch('builtins.__import__', Mock(side_effect=ImportError)): self.assertRaises(SystemExit, check_psycopg) with patch('builtins.__import__', mock_import): self.assertIsNone(check_psycopg()) with patch('builtins.__import__', mock_import2): self.assertRaises(SystemExit, check_psycopg) def test_ensure_unique_name(self): # None/empty cluster implies unique name with patch('patroni.dcs.AbstractDCS.get_cluster', Mock(return_value=None)): self.assertIsNone(self.p.ensure_unique_name()) empty_cluster = Cluster.empty() with patch('patroni.dcs.AbstractDCS.get_cluster', Mock(return_value=empty_cluster)): self.assertIsNone(self.p.ensure_unique_name()) without_members = empty_cluster._asdict() del without_members['members'] # Cluster with members with different names implies unique name okay_cluster = Cluster( members=[Member(version=1, name="distinct", session=1, data={})], **without_members ) with patch('patroni.dcs.AbstractDCS.get_cluster', Mock(return_value=okay_cluster)): self.assertIsNone(self.p.ensure_unique_name()) # Cluster with a member with the same name that is running bad_cluster = Cluster( members=[Member(version=1, name="postgresql0", session=1, data={ "api_url": "https://127.0.0.1:8008", })], **without_members ) with patch('patroni.dcs.AbstractDCS.get_cluster', Mock(return_value=bad_cluster)): # If the api of the running node cannot be reached, this implies unique name with patch('urllib3.PoolManager.request', Mock(side_effect=ConnectionError)): self.assertIsNone(self.p.ensure_unique_name()) # Only if the api of the running node is reachable do we throw an error with patch('urllib3.PoolManager.request', Mock()): self.assertRaises(SystemExit, self.p.ensure_unique_name) patroni-3.2.2/tests/test_postgresql.py000066400000000000000000001504701455170150700201640ustar00rootroot00000000000000import datetime import os import psutil import re import subprocess import time from copy import deepcopy from mock import Mock, MagicMock, PropertyMock, patch, mock_open import patroni.psycopg as psycopg from patroni.async_executor import CriticalTask from patroni.collections import CaseInsensitiveSet from patroni.config import GlobalConfig from patroni.dcs import RemoteMember from patroni.exceptions import PostgresConnectionException, PatroniException from patroni.postgresql import Postgresql, STATE_REJECT, STATE_NO_RESPONSE from patroni.postgresql.bootstrap import Bootstrap from patroni.postgresql.callback_executor import CallbackAction from patroni.postgresql.config import _false_validator from patroni.postgresql.postmaster import PostmasterProcess from patroni.postgresql.validator import (ValidatorFactoryNoType, ValidatorFactoryInvalidType, ValidatorFactoryInvalidSpec, ValidatorFactory, InvalidGucValidatorsFile, _get_postgres_guc_validators, _read_postgres_gucs_validators_file, _load_postgres_gucs_validators, Bool, Integer, Real, Enum, EnumBool, String) from patroni.utils import RetryFailedError from threading import Thread, current_thread from . import (BaseTestPostgresql, MockCursor, MockPostmaster, psycopg_connect, mock_available_gucs, GET_PG_SETTINGS_RESULT) mtime_ret = {} def mock_mtime(filename): if filename not in mtime_ret: mtime_ret[filename] = time.time() else: mtime_ret[filename] += 1 return mtime_ret[filename] def pg_controldata_string(*args, **kwargs): return b""" pg_control version number: 942 Catalog version number: 201509161 Database system identifier: 6200971513092291716 Database cluster state: shut down in recovery pg_control last modified: Fri Oct 2 10:57:06 2015 Latest checkpoint location: 0/30000C8 Prior checkpoint location: 0/2000060 Latest checkpoint's REDO location: 0/3000090 Latest checkpoint's REDO WAL file: 000000020000000000000003 Latest checkpoint's TimeLineID: 2 Latest checkpoint's PrevTimeLineID: 2 Latest checkpoint's full_page_writes: on Latest checkpoint's NextXID: 0/943 Latest checkpoint's NextOID: 24576 Latest checkpoint's NextMultiXactId: 1 Latest checkpoint's NextMultiOffset: 0 Latest checkpoint's oldestXID: 931 Latest checkpoint's oldestXID's DB: 1 Latest checkpoint's oldestActiveXID: 943 Latest checkpoint's oldestMultiXid: 1 Latest checkpoint's oldestMulti's DB: 1 Latest checkpoint's oldestCommitTs: 0 Latest checkpoint's newestCommitTs: 0 Time of latest checkpoint: Fri Oct 2 10:56:54 2015 Fake LSN counter for unlogged rels: 0/1 Minimum recovery ending location: 0/30241F8 Min recovery ending loc's timeline: 2 Backup start location: 0/0 Backup end location: 0/0 End-of-backup record required: no wal_level setting: hot_standby Current wal_log_hints setting: on Current max_connections setting: 100 Current max_worker_processes setting: 8 Current max_prepared_xacts setting: 0 Current max_locks_per_xact setting: 64 Current track_commit_timestamp setting: off Maximum data alignment: 8 Database block size: 8192 Blocks per segment of large relation: 131072 WAL block size: 8192 Bytes per WAL segment: 16777216 Maximum length of identifiers: 64 Maximum columns in an index: 32 Maximum size of a TOAST chunk: 1996 Size of a large-object chunk: 2048 Date/time type storage: 64-bit integers Float4 argument passing: by value Float8 argument passing: by value Data page checksum version: 0 """ @patch('subprocess.call', Mock(return_value=0)) @patch('patroni.psycopg.connect', psycopg_connect) @patch.object(Postgresql, 'available_gucs', mock_available_gucs) class TestPostgresql(BaseTestPostgresql): @patch('subprocess.call', Mock(return_value=0)) @patch('os.rename', Mock()) @patch('patroni.postgresql.CallbackExecutor', Mock()) @patch.object(Postgresql, 'get_major_version', Mock(return_value=140000)) @patch.object(Postgresql, 'is_running', Mock(return_value=True)) @patch.object(Postgresql, 'available_gucs', mock_available_gucs) def setUp(self): super(TestPostgresql, self).setUp() self.p.config.write_postgresql_conf() @patch('subprocess.Popen') @patch.object(Postgresql, 'wait_for_startup') @patch.object(Postgresql, 'wait_for_port_open') @patch.object(Postgresql, 'is_running') @patch.object(Postgresql, 'controldata', Mock()) def test_start(self, mock_is_running, mock_wait_for_port_open, mock_wait_for_startup, mock_popen): mock_is_running.return_value = MockPostmaster() mock_wait_for_port_open.return_value = True mock_wait_for_startup.return_value = False mock_popen.return_value.stdout.readline.return_value = '123' self.assertTrue(self.p.start()) mock_is_running.return_value = None with patch.object(Postgresql, 'ensure_major_version_is_known', Mock(return_value=False)): self.assertIsNone(self.p.start()) mock_postmaster = MockPostmaster() with patch.object(PostmasterProcess, 'start', return_value=mock_postmaster): pg_conf = os.path.join(self.p.data_dir, 'postgresql.conf') open(pg_conf, 'w').close() self.assertFalse(self.p.start(task=CriticalTask())) with open(pg_conf) as f: lines = f.readlines() self.assertTrue("f.oo = 'bar'\n" in lines) mock_wait_for_startup.return_value = None self.assertFalse(self.p.start(10)) self.assertIsNone(self.p.start()) mock_wait_for_port_open.return_value = False self.assertFalse(self.p.start()) task = CriticalTask() task.cancel() self.assertFalse(self.p.start(task=task)) self.p.cancellable.cancel() self.assertFalse(self.p.start()) with patch('patroni.postgresql.config.ConfigHandler.effective_configuration', PropertyMock(side_effect=Exception)): self.assertIsNone(self.p.start()) @patch.object(Postgresql, 'pg_isready') @patch('patroni.postgresql.polling_loop', Mock(return_value=range(1))) def test_wait_for_port_open(self, mock_pg_isready): mock_pg_isready.return_value = STATE_NO_RESPONSE mock_postmaster = MockPostmaster() mock_postmaster.is_running.return_value = None # No pid file and postmaster death self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1)) mock_postmaster.is_running.return_value = True # timeout self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1)) # pg_isready failure mock_pg_isready.return_value = 'garbage' self.assertTrue(self.p.wait_for_port_open(mock_postmaster, 1)) # cancelled self.p.cancellable.cancel() self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1)) @patch('time.sleep', Mock()) @patch.object(Postgresql, 'is_running') @patch.object(Postgresql, '_wait_for_connection_close', Mock()) @patch('patroni.postgresql.cancellable.CancellableSubprocess.call') def test_stop(self, mock_cancellable_call, mock_is_running): # Postmaster is not running mock_callback = Mock() mock_is_running.return_value = None self.assertTrue(self.p.stop(on_safepoint=mock_callback)) mock_callback.assert_called() # Is running, stopped successfully mock_is_running.return_value = mock_postmaster = MockPostmaster() mock_callback.reset_mock() self.assertTrue(self.p.stop(on_safepoint=mock_callback)) mock_callback.assert_called() mock_postmaster.signal_stop.assert_called() # Timed out waiting for fast shutdown triggers immediate shutdown mock_postmaster.wait.side_effect = [psutil.TimeoutExpired(30), psutil.TimeoutExpired(30), Mock()] mock_callback.reset_mock() self.assertTrue(self.p.stop(on_safepoint=mock_callback, stop_timeout=30)) mock_callback.assert_called() mock_postmaster.signal_stop.assert_called() # Immediate shutdown succeeded mock_postmaster.wait.side_effect = [psutil.TimeoutExpired(30), Mock()] self.assertTrue(self.p.stop(on_safepoint=mock_callback, stop_timeout=30)) # Ensure before_stop script is called when configured to self.p.config._config['before_stop'] = ':' mock_postmaster.wait.side_effect = [psutil.TimeoutExpired(30), Mock()] mock_cancellable_call.return_value = 0 with patch('patroni.postgresql.logger.info') as mock_logger: self.p.stop(on_safepoint=mock_callback, stop_timeout=30) self.assertEqual(mock_logger.call_args[0], ('before_stop script `%s` exited with %s', ':', 0)) mock_postmaster.wait.side_effect = [psutil.TimeoutExpired(30), Mock()] mock_cancellable_call.side_effect = Exception with patch('patroni.postgresql.logger.error') as mock_logger: self.p.stop(on_safepoint=mock_callback, stop_timeout=30) self.assertEqual(mock_logger.call_args_list[1][0][0], 'Exception when calling `%s`: %r') # Stop signal failed mock_postmaster.signal_stop.return_value = False self.assertFalse(self.p.stop()) # Stop signal failed to find process mock_postmaster.signal_stop.return_value = True mock_callback.reset_mock() self.assertTrue(self.p.stop(on_safepoint=mock_callback)) mock_callback.assert_called() # Fast shutdown is timed out but when immediate postmaster is already gone mock_postmaster.wait.side_effect = [psutil.TimeoutExpired(30), Mock()] mock_postmaster.signal_stop.side_effect = [None, True] self.assertTrue(self.p.stop(on_safepoint=mock_callback, stop_timeout=30)) @patch('time.sleep', Mock()) @patch.object(Postgresql, 'is_running', MockPostmaster) @patch.object(Postgresql, '_wait_for_connection_close', Mock()) @patch.object(Postgresql, 'latest_checkpoint_location', Mock(return_value='7')) def test__do_stop(self): mock_callback = Mock() with patch.object(Postgresql, 'controldata', Mock(return_value={'Database cluster state': 'shut down', "Latest checkpoint's TimeLineID": '1', 'Latest checkpoint location': '1/1'})): self.assertTrue(self.p.stop(on_shutdown=mock_callback, stop_timeout=3)) mock_callback.assert_called() with patch.object(Postgresql, 'controldata', Mock(return_value={'Database cluster state': 'shut down in recovery'})): self.assertTrue(self.p.stop(on_shutdown=mock_callback, stop_timeout=3)) with patch.object(Postgresql, 'controldata', Mock(return_value={'Database cluster state': 'shutting down'})): self.assertTrue(self.p.stop(on_shutdown=mock_callback, stop_timeout=3)) def test_restart(self): self.p.start = Mock(return_value=False) self.assertFalse(self.p.restart()) self.assertEqual(self.p.state, 'restart failed (restarting)') @patch('os.chmod', Mock()) @patch('builtins.open', MagicMock()) def test_write_pgpass(self): self.p.config.write_pgpass({'host': 'localhost', 'port': '5432', 'user': 'foo'}) self.p.config.write_pgpass({'host': 'localhost', 'port': '5432', 'user': 'foo', 'password': 'bar'}) def test_checkpoint(self): with patch.object(MockCursor, 'fetchone', Mock(return_value=(True, ))): self.assertEqual(self.p.checkpoint({'user': 'postgres'}), 'is_in_recovery=true') with patch.object(MockCursor, 'execute', Mock(return_value=None)): self.assertIsNone(self.p.checkpoint()) self.assertEqual(self.p.checkpoint(timeout=10), 'not accessible or not healty') @patch('patroni.postgresql.config.mtime', mock_mtime) @patch('patroni.postgresql.config.ConfigHandler._get_pg_settings') def test_check_recovery_conf(self, mock_get_pg_settings): self.p.call_nowait(CallbackAction.ON_START) mock_get_pg_settings.return_value = { 'primary_conninfo': ['primary_conninfo', 'foo=', None, 'string', 'postmaster', self.p.config._auto_conf], 'recovery_min_apply_delay': ['recovery_min_apply_delay', '0', 'ms', 'integer', 'sighup', 'foo'] } self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) self.p.config.write_recovery_conf({'standby_mode': 'on'}) self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) mock_get_pg_settings.return_value['primary_conninfo'][1] = '' mock_get_pg_settings.return_value['recovery_min_apply_delay'][1] = '1' self.assertEqual(self.p.config.check_recovery_conf(None), (False, False)) mock_get_pg_settings.return_value['recovery_min_apply_delay'][5] = self.p.config._auto_conf self.assertEqual(self.p.config.check_recovery_conf(None), (True, False)) mock_get_pg_settings.return_value['recovery_min_apply_delay'][1] = '0' self.assertEqual(self.p.config.check_recovery_conf(None), (False, False)) conninfo = {'host': '1', 'password': 'bar'} with patch('patroni.postgresql.config.ConfigHandler.primary_conninfo_params', Mock(return_value=conninfo)): mock_get_pg_settings.return_value['recovery_min_apply_delay'][1] = '1' self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) mock_get_pg_settings.return_value['primary_conninfo'][1] = 'host=1 target_session_attrs=read-write'\ + ' passfile=' + re.sub(r'([\'\\ ])', r'\\\1', self.p.config._pgpass) mock_get_pg_settings.return_value['recovery_min_apply_delay'][1] = '0' self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) self.p.config.write_recovery_conf({'standby_mode': 'on', 'primary_conninfo': conninfo.copy()}) self.p.config.write_postgresql_conf() self.assertEqual(self.p.config.check_recovery_conf(None), (False, False)) with patch.object(Postgresql, 'primary_conninfo', Mock(return_value='host=1')): mock_get_pg_settings.return_value['primary_slot_name'] = [ 'primary_slot_name', '', '', 'string', 'postmaster', self.p.config._postgresql_conf] self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) @patch.object(Postgresql, 'major_version', PropertyMock(return_value=120000)) @patch.object(Postgresql, 'is_running', MockPostmaster) @patch.object(MockPostmaster, 'create_time', Mock(return_value=1234567), create=True) @patch('patroni.postgresql.config.ConfigHandler._get_pg_settings') def test__read_recovery_params(self, mock_get_pg_settings): self.p.call_nowait(CallbackAction.ON_START) mock_get_pg_settings.return_value = {'primary_conninfo': ['primary_conninfo', '', None, 'string', 'postmaster', self.p.config._postgresql_conf]} self.p.config.write_recovery_conf({'standby_mode': 'on', 'primary_conninfo': {'password': 'foo'}}) self.p.config.write_postgresql_conf() self.assertEqual(self.p.config.check_recovery_conf(None), (False, False)) self.assertEqual(self.p.config.check_recovery_conf(None), (False, False)) # Config files changed, but can't connect to postgres mock_get_pg_settings.side_effect = PostgresConnectionException('') with patch('patroni.postgresql.config.mtime', mock_mtime): self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) # Config files didn't change, but postgres crashed or in crash recovery with patch.object(MockPostmaster, 'create_time', Mock(return_value=1234568), create=True): self.assertEqual(self.p.config.check_recovery_conf(None), (False, False)) # Any other exception raised when executing the query mock_get_pg_settings.side_effect = Exception with patch('patroni.postgresql.config.mtime', mock_mtime): self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) with patch.object(Postgresql, 'is_starting', Mock(return_value=True)): self.assertEqual(self.p.config.check_recovery_conf(None), (False, False)) @patch.object(Postgresql, 'major_version', PropertyMock(return_value=100000)) @patch.object(Postgresql, 'primary_conninfo', Mock(return_value='host=1')) def test__read_recovery_params_pre_v12(self): self.p.config.write_recovery_conf({'standby_mode': 'off', 'primary_conninfo': {'password': 'foo'}}) self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) self.p.config.write_recovery_conf({'restore_command': '\n'}) with patch('patroni.postgresql.config.mtime', mock_mtime): self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) def test_write_postgresql_and_sanitize_auto_conf(self): read_data = 'primary_conninfo = foo\nfoo = bar\n' with open(os.path.join(self.p.data_dir, 'postgresql.auto.conf'), 'w') as f: f.write(read_data) mock_read_auto = mock_open(read_data=read_data) mock_read_auto.return_value.__iter__ = lambda o: iter(o.readline, '') with patch('builtins.open', Mock(side_effect=[mock_open()(), mock_read_auto(), IOError])), \ patch('os.chmod', Mock()): self.p.config.write_postgresql_conf() with patch('builtins.open', Mock(side_effect=[mock_open()(), IOError])), patch('os.chmod', Mock()): self.p.config.write_postgresql_conf() self.p.config.write_recovery_conf({'foo': 'bar'}) self.p.config.write_postgresql_conf() @patch.object(Postgresql, 'is_running', Mock(return_value=False)) @patch.object(Postgresql, 'start', Mock()) def test_follow(self): self.p.call_nowait(CallbackAction.ON_START) m = RemoteMember('1', {'restore_command': '2', 'primary_slot_name': 'foo', 'conn_kwargs': {'host': 'bar'}}) self.p.follow(m) with patch.object(Postgresql, 'ensure_major_version_is_known', Mock(return_value=False)): self.assertIsNone(self.p.follow(m)) @patch.object(MockCursor, 'execute', Mock(side_effect=psycopg.OperationalError)) def test__query(self): self.assertRaises(PostgresConnectionException, self.p._query, 'blabla') self.p._state = 'restarting' self.assertRaises(RetryFailedError, self.p._query, 'blabla') def test_query(self): self.p.query('select 1') self.assertRaises(PostgresConnectionException, self.p.query, 'RetryFailedError') self.assertRaises(psycopg.ProgrammingError, self.p.query, 'blabla') @patch.object(Postgresql, 'pg_isready', Mock(return_value=STATE_REJECT)) def test_is_primary(self): self.assertTrue(self.p.is_primary()) self.p.reset_cluster_info_state(None) with patch.object(Postgresql, '_query', Mock(side_effect=RetryFailedError(''))): self.assertFalse(self.p.is_primary()) @patch.object(Postgresql, 'controldata', Mock(return_value={'Database cluster state': 'shut down', 'Latest checkpoint location': '0/1ADBC18', "Latest checkpoint's TimeLineID": '1'})) @patch('subprocess.Popen') def test_latest_checkpoint_location(self, mock_popen): mock_popen.return_value.communicate.return_value = (None, None) self.assertEqual(self.p.latest_checkpoint_location(), 28163096) with patch.object(Postgresql, 'controldata', Mock(return_value={'Database cluster state': 'shut down', 'Latest checkpoint location': 'k/1ADBC18', "Latest checkpoint's TimeLineID": '1'})): self.assertIsNone(self.p.latest_checkpoint_location()) # 9.3 and 9.4 format mock_popen.return_value.communicate.side_effect = [ (b'rmgr: XLOG len (rec/tot): 72/ 104, tx: 0, lsn: 0/01ADBC18, prev 0/01ADBBB8, ' + b'bkp: 0000, desc: checkpoint: redo 0/1ADBC18; tli 1; prev tli 1; fpw true; xid 0/727; oid 16386; multi' + b' 1; offset 0; oldest xid 715 in DB 1; oldest multi 1 in DB 1; oldest running xid 0; shutdown', None), (b'rmgr: Transaction len (rec/tot): 64/ 96, tx: 726, lsn: 0/01ADBBB8, prev 0/01ADBB70, ' + b'bkp: 0000, desc: commit: 2021-02-26 11:19:37.900918 CET; inval msgs: catcache 11 catcache 10', None)] self.assertEqual(self.p.latest_checkpoint_location(), 28163096) mock_popen.return_value.communicate.side_effect = [ (b'rmgr: XLOG len (rec/tot): 72/ 104, tx: 0, lsn: 0/01ADBC18, prev 0/01ADBBB8, ' + b'bkp: 0000, desc: checkpoint: redo 0/1ADBC18; tli 1; prev tli 1; fpw true; xid 0/727; oid 16386; multi' + b' 1; offset 0; oldest xid 715 in DB 1; oldest multi 1 in DB 1; oldest running xid 0; shutdown', None), (b'rmgr: XLOG len (rec/tot): 0/ 32, tx: 0, lsn: 0/01ADBBB8, prev 0/01ADBBA0, ' + b'bkp: 0000, desc: xlog switch ', None)] self.assertEqual(self.p.latest_checkpoint_location(), 28163000) # 9.5+ format mock_popen.return_value.communicate.side_effect = [ (b'rmgr: XLOG len (rec/tot): 114/ 114, tx: 0, lsn: 0/01ADBC18, prev 0/018260F8, ' + b'desc: CHECKPOINT_SHUTDOWN redo 0/1825ED8; tli 1; prev tli 1; fpw true; xid 0:494; oid 16387; multi 1' + b'; offset 0; oldest xid 479 in DB 1; oldest multi 1 in DB 1; oldest/newest commit timestamp xid: 0/0;' + b' oldest running xid 0; shutdown', None), (b'rmgr: XLOG len (rec/tot): 24/ 24, tx: 0, lsn: 0/018260F8, prev 0/01826080, ' + b'desc: SWITCH ', None)] self.assertEqual(self.p.latest_checkpoint_location(), 25321720) def test_reload(self): self.assertTrue(self.p.reload()) @patch.object(Postgresql, 'is_running') def test_is_healthy(self, mock_is_running): mock_is_running.return_value = True self.assertTrue(self.p.is_healthy()) mock_is_running.return_value = False self.assertFalse(self.p.is_healthy()) @patch('psutil.Popen') def test_promote(self, mock_popen): mock_popen.return_value.wait.return_value = 0 task = CriticalTask() self.assertTrue(self.p.promote(0, task)) self.p.set_role('replica') self.p.config._config['pre_promote'] = 'test' with patch('patroni.postgresql.cancellable.CancellableSubprocess.is_cancelled', PropertyMock(return_value=1)): self.assertFalse(self.p.promote(0, task)) mock_popen.side_effect = Exception self.assertFalse(self.p.promote(0, task)) task.reset() task.cancel() self.assertFalse(self.p.promote(0, task)) def test_timeline_wal_position(self): self.assertEqual(self.p.timeline_wal_position(), (1, 2, 1)) Thread(target=self.p.timeline_wal_position).start() @patch.object(PostmasterProcess, 'from_pidfile') def test_is_running(self, mock_frompidfile): # Cached postmaster running mock_postmaster = self.p._postmaster_proc = MockPostmaster() self.assertEqual(self.p.is_running(), mock_postmaster) # Cached postmaster not running, no postmaster running mock_postmaster.is_running.return_value = False mock_frompidfile.return_value = None self.assertEqual(self.p.is_running(), None) self.assertEqual(self.p._postmaster_proc, None) # No cached postmaster, postmaster running mock_frompidfile.return_value = mock_postmaster2 = MockPostmaster() self.assertEqual(self.p.is_running(), mock_postmaster2) self.assertEqual(self.p._postmaster_proc, mock_postmaster2) @patch('shlex.split', Mock(side_effect=OSError)) def test_call_nowait(self): self.p.set_role('replica') self.assertIsNone(self.p.call_nowait(CallbackAction.ON_START)) self.p.bootstrapping = True self.assertIsNone(self.p.call_nowait(CallbackAction.ON_START)) @patch.object(Postgresql, 'is_running', Mock(return_value=MockPostmaster())) def test_is_primary_exception(self): self.p.start() self.p.query = Mock(side_effect=psycopg.OperationalError("not supported")) self.assertTrue(self.p.stop()) @patch('os.rename', Mock()) @patch('os.path.exists', Mock(return_value=True)) @patch('shutil.rmtree', Mock()) @patch('os.path.isdir', Mock(return_value=True)) @patch('os.unlink', Mock()) @patch('os.symlink', Mock()) @patch('patroni.postgresql.Postgresql.pg_wal_realpath', Mock(return_value={'pg_wal': '/mnt/pg_wal'})) @patch('patroni.postgresql.Postgresql.pg_tblspc_realpaths', Mock(return_value={'42': '/mnt/tablespaces/archive'})) def test_move_data_directory(self): self.p.move_data_directory() with patch('os.rename', Mock(side_effect=OSError)): self.p.move_data_directory() @patch('os.listdir', Mock(return_value=['recovery.conf'])) @patch('os.path.exists', Mock(return_value=True)) @patch.object(Postgresql, 'controldata', Mock()) def test_get_postgres_role_from_data_directory(self): self.assertEqual(self.p.get_postgres_role_from_data_directory(), 'replica') @patch('os.remove', Mock()) @patch('shutil.rmtree', Mock()) @patch('os.unlink', Mock(side_effect=OSError)) @patch('os.path.isdir', Mock(return_value=True)) @patch('os.path.exists', Mock(return_value=True)) def test_remove_data_directory(self): with patch('os.path.islink', Mock(return_value=True)): self.p.remove_data_directory() with patch('os.path.isfile', Mock(return_value=True)): self.p.remove_data_directory() with patch('os.path.islink', Mock(side_effect=[False, False, True, True])), \ patch('os.listdir', Mock(return_value=['12345'])), \ patch('os.path.realpath', Mock(side_effect=['../foo', '../foo_tsp'])): self.p.remove_data_directory() @patch('patroni.postgresql.Postgresql._version_file_exists', Mock(return_value=True)) def test_controldata(self): with patch('subprocess.check_output', Mock(return_value=0, side_effect=pg_controldata_string)): data = self.p.controldata() self.assertEqual(len(data), 50) self.assertEqual(data['Database cluster state'], 'shut down in recovery') self.assertEqual(data['wal_log_hints setting'], 'on') self.assertEqual(int(data['Database block size']), 8192) with patch('subprocess.check_output', Mock(side_effect=subprocess.CalledProcessError(1, ''))): self.assertEqual(self.p.controldata(), {}) @patch('patroni.postgresql.Postgresql._version_file_exists', Mock(return_value=True)) @patch('subprocess.check_output', MagicMock(return_value=0, side_effect=pg_controldata_string)) def test_sysid(self): self.assertEqual(self.p.sysid, "6200971513092291716") @patch('os.path.isfile', Mock(return_value=True)) @patch('shutil.copy', Mock(side_effect=IOError)) def test_save_configuration_files(self): self.p.config.save_configuration_files() @patch('os.path.isfile', Mock(side_effect=[False, True, False, True])) @patch('shutil.copy', Mock(side_effect=[None, IOError])) @patch('os.chmod', Mock()) def test_restore_configuration_files(self): self.p.config.restore_configuration_files() def test_can_create_replica_without_replication_connection(self): self.p.config._config['create_replica_method'] = [] self.assertFalse(self.p.can_create_replica_without_replication_connection(None)) self.p.config._config['create_replica_method'] = ['wale', 'basebackup'] self.p.config._config['wale'] = {'command': 'foo', 'no_leader': 1} self.assertTrue(self.p.can_create_replica_without_replication_connection(None)) def test_replica_method_can_work_without_replication_connection(self): self.assertFalse(self.p.replica_method_can_work_without_replication_connection('basebackup')) self.assertFalse(self.p.replica_method_can_work_without_replication_connection('foobar')) self.p.config._config['foo'] = {'command': 'bar', 'no_leader': 1} self.assertTrue(self.p.replica_method_can_work_without_replication_connection('foo')) self.p.config._config['foo'] = {'command': 'bar'} self.assertFalse(self.p.replica_method_can_work_without_replication_connection('foo')) @patch('time.sleep', Mock()) @patch.object(Postgresql, 'is_running', Mock(return_value=True)) @patch('patroni.postgresql.config.logger.info') @patch('patroni.postgresql.config.logger.warning') def test_reload_config(self, mock_warning, mock_info): config = deepcopy(self.p.config._config) # Nothing changed self.p.reload_config(config) mock_info.assert_called_once_with('No PostgreSQL configuration items changed, nothing to reload.') mock_warning.assert_not_called() self.assertEqual(self.p.pending_restart, False) mock_info.reset_mock() # Ignored params changed config['parameters']['archive_cleanup_command'] = 'blabla' self.p.reload_config(config) mock_info.assert_called_once_with('No PostgreSQL configuration items changed, nothing to reload.') self.assertEqual(self.p.pending_restart, False) mock_info.reset_mock() # Handle wal_buffers self.p.config._config['parameters']['wal_buffers'] = '512' self.p.reload_config(config) mock_info.assert_called_once_with('No PostgreSQL configuration items changed, nothing to reload.') self.assertEqual(self.p.pending_restart, False) mock_info.reset_mock() config = deepcopy(self.p.config._config) # hba/ident_changed config['pg_hba'] = [''] config['pg_ident'] = [''] self.p.reload_config(config) mock_info.assert_called_once_with('Reloading PostgreSQL configuration.') self.assertEqual(self.p.pending_restart, False) mock_info.reset_mock() # Postmaster parameter change (pending_restart) init_max_worker_processes = config['parameters']['max_worker_processes'] config['parameters']['max_worker_processes'] *= 2 with patch('patroni.postgresql.Postgresql._query', Mock(side_effect=[GET_PG_SETTINGS_RESULT, [(1,)]])): self.p.reload_config(config) self.assertEqual(mock_info.call_args_list[0][0], ('Changed %s from %s to %s (restart might be required)', 'max_worker_processes', str(init_max_worker_processes), config['parameters']['max_worker_processes'])) self.assertEqual(mock_info.call_args_list[1][0], ('Reloading PostgreSQL configuration.',)) self.assertEqual(self.p.pending_restart, True) mock_info.reset_mock() # Reset to the initial value without restart config['parameters']['max_worker_processes'] = init_max_worker_processes self.p.reload_config(config) self.assertEqual(mock_info.call_args_list[0][0], ('Changed %s from %s to %s', 'max_worker_processes', init_max_worker_processes * 2, str(config['parameters']['max_worker_processes']))) self.assertEqual(mock_info.call_args_list[1][0], ('Reloading PostgreSQL configuration.',)) self.assertEqual(self.p.pending_restart, False) mock_info.reset_mock() # User-defined parameter changed (removed) config['parameters'].pop('f.oo') self.p.reload_config(config) self.assertEqual(mock_info.call_args_list[0][0], ('Changed %s from %s to %s', 'f.oo', 'bar', None)) self.assertEqual(mock_info.call_args_list[1][0], ('Reloading PostgreSQL configuration.',)) self.assertEqual(self.p.pending_restart, False) mock_info.reset_mock() # Non-postmaster parameter change config['parameters']['autovacuum'] = 'off' self.p.reload_config(config) self.assertEqual(mock_info.call_args_list[0][0], ("Changed %s from %s to %s", 'autovacuum', 'on', 'off')) self.assertEqual(mock_info.call_args_list[1][0], ('Reloading PostgreSQL configuration.',)) self.assertEqual(self.p.pending_restart, False) config['parameters']['autovacuum'] = 'on' mock_info.reset_mock() # Remove invalid parameter config['parameters']['invalid'] = 'value' self.p.reload_config(config) self.assertEqual(mock_warning.call_args_list[0][0], ('Removing invalid parameter `%s` from postgresql.parameters', 'invalid')) config['parameters'].pop('invalid') mock_warning.reset_mock() mock_info.reset_mock() # Non-empty result (outside changes) and exception while querying pending_restart parameters with patch('patroni.postgresql.Postgresql._query', Mock(side_effect=[GET_PG_SETTINGS_RESULT, [(1,)], GET_PG_SETTINGS_RESULT, Exception])): self.p.reload_config(config, True) self.assertEqual(mock_info.call_args_list[0][0], ('Reloading PostgreSQL configuration.',)) self.assertEqual(self.p.pending_restart, True) # Invalid values, just to increase silly coverage in postgresql.validator. # One day we will have proper tests there. config['parameters']['autovacuum'] = 'of' # Bool.transform() config['parameters']['vacuum_cost_limit'] = 'smth' # Number.transform() self.p.reload_config(config, True) self.assertEqual(mock_warning.call_args_list[-1][0][0], 'Exception %r when running query') def test_resolve_connection_addresses(self): self.p.config._config['use_unix_socket'] = self.p.config._config['use_unix_socket_repl'] = True self.p.config.resolve_connection_addresses() self.assertEqual(self.p.config.local_replication_address, {'host': '/tmp', 'port': '5432'}) self.p.config._server_parameters.pop('unix_socket_directories') self.p.config.resolve_connection_addresses() self.assertEqual(self.p.connection_pool.conn_kwargs, {'connect_timeout': 3, 'dbname': 'postgres', 'fallback_application_name': 'Patroni', 'options': '-c statement_timeout=2000', 'password': 'test', 'port': '5432', 'user': 'foo'}) @patch.object(Postgresql, '_version_file_exists', Mock(return_value=True)) def test_get_major_version(self): with patch('builtins.open', mock_open(read_data='9.4')): self.assertEqual(self.p.get_major_version(), 90400) with patch('builtins.open', Mock(side_effect=Exception)): self.assertEqual(self.p.get_major_version(), 0) def test_postmaster_start_time(self): now = datetime.datetime.now() with patch.object(MockCursor, "fetchall", Mock(return_value=[(now, True, '', '', '', '', False)])): self.assertEqual(self.p.postmaster_start_time(), now.isoformat(sep=' ')) t = Thread(target=self.p.postmaster_start_time) t.start() t.join() with patch.object(MockCursor, "execute", side_effect=psycopg.Error): self.assertIsNone(self.p.postmaster_start_time()) def test_check_for_startup(self): with patch('subprocess.call', return_value=0): self.p._state = 'starting' self.assertFalse(self.p.check_for_startup()) self.assertEqual(self.p.state, 'running') with patch('subprocess.call', return_value=1): self.p._state = 'starting' self.assertTrue(self.p.check_for_startup()) self.assertEqual(self.p.state, 'starting') with patch('subprocess.call', return_value=2): self.p._state = 'starting' self.assertFalse(self.p.check_for_startup()) self.assertEqual(self.p.state, 'start failed') with patch('subprocess.call', return_value=0): self.p._state = 'running' self.assertFalse(self.p.check_for_startup()) self.assertEqual(self.p.state, 'running') with patch('subprocess.call', return_value=127): self.p._state = 'running' self.assertFalse(self.p.check_for_startup()) self.assertEqual(self.p.state, 'running') self.p._state = 'starting' self.assertFalse(self.p.check_for_startup()) self.assertEqual(self.p.state, 'running') def test_wait_for_startup(self): state = {'sleeps': 0, 'num_rejects': 0, 'final_return': 0} self.__thread_ident = current_thread().ident def increment_sleeps(*args): if current_thread().ident == self.__thread_ident: print("Sleep") state['sleeps'] += 1 def isready_return(*args): ret = 1 if state['sleeps'] < state['num_rejects'] else state['final_return'] print("Isready {0} {1}".format(ret, state)) return ret def time_in_state(*args): return state['sleeps'] with patch('subprocess.call', side_effect=isready_return): with patch('time.sleep', side_effect=increment_sleeps): self.p.time_in_state = Mock(side_effect=time_in_state) self.p._state = 'stopped' self.assertTrue(self.p.wait_for_startup()) self.assertEqual(state['sleeps'], 0) self.p._state = 'starting' state['num_rejects'] = 5 self.assertTrue(self.p.wait_for_startup()) self.assertEqual(state['sleeps'], 5) self.p._state = 'starting' state['sleeps'] = 0 state['final_return'] = 2 self.assertFalse(self.p.wait_for_startup()) self.p._state = 'starting' state['sleeps'] = 0 state['final_return'] = 0 self.assertFalse(self.p.wait_for_startup(timeout=2)) self.assertEqual(state['sleeps'], 3) with patch.object(Postgresql, 'check_startup_state_changed', Mock(return_value=False)): self.p.cancellable.cancel() self.p._state = 'starting' self.assertIsNone(self.p.wait_for_startup()) def test_get_server_parameters(self): config = {'parameters': {'wal_level': 'hot_standby', 'max_prepared_transactions': 100}, 'listen': '0'} self.p._global_config = GlobalConfig({'synchronous_mode': True}) self.p.config.get_server_parameters(config) self.p._global_config = GlobalConfig({'synchronous_mode': True, 'synchronous_mode_strict': True}) self.p.config.get_server_parameters(config) self.p.config.set_synchronous_standby_names('foo') self.assertTrue(str(self.p.config.get_server_parameters(config)).startswith('= times: pass else: scope['times'] += 1 raise PatroniException('Failed!') return inner def test_reset(self): retry = Retry(delay=0, max_tries=2) retry(self._fail()) self.assertEqual(retry._attempts, 1) retry.reset() self.assertEqual(retry._attempts, 0) def test_too_many_tries(self): retry = Retry(delay=0) self.assertRaises(RetryFailedError, retry, self._fail(times=999)) self.assertEqual(retry._attempts, 1) def test_maximum_delay(self): retry = Retry(delay=10, max_tries=100) retry(self._fail(times=10)) self.assertTrue(retry._cur_delay < 4000, retry._cur_delay) # gevent's sleep function is picky about the type self.assertEqual(type(retry._cur_delay), float) def test_deadline(self): retry = Retry(deadline=0.0001) self.assertRaises(RetryFailedError, retry, self._fail(times=100)) def test_copy(self): def _sleep(t): pass retry = Retry(sleep_func=_sleep) rcopy = retry.copy() self.assertTrue(rcopy.sleep_func is _sleep) patroni-3.2.2/tests/test_validator.py000066400000000000000000000334061455170150700177450ustar00rootroot00000000000000import copy import os import socket import tempfile import unittest from io import StringIO from mock import Mock, patch, mock_open from patroni.dcs import dcs_modules from patroni.validator import schema, Directory, Schema available_dcs = [m.split(".")[-1] for m in dcs_modules()] config = { "name": "string", "scope": "string", "restapi": { "listen": "127.0.0.2:800", "connect_address": "127.0.0.2:800", "verify_client": 'none' }, "bootstrap": { "dcs": { "ttl": 1000, "loop_wait": 1000, "retry_timeout": 1000, "maximum_lag_on_failover": 1000 }, "initdb": ["string", {"key": "value"}] }, "consul": { "host": "127.0.0.1:5000" }, "etcd": { "hosts": "127.0.0.1:2379,127.0.0.1:2380" }, "etcd3": { "url": "https://127.0.0.1:2379" }, "exhibitor": { "hosts": ["string"], "port": 4000, "pool_interval": 1000 }, "raft": { "self_addr": "127.0.0.1:2222", "bind_addr": "0.0.0.0:2222", "partner_addrs": ["127.0.0.1:2223", "127.0.0.1:2224"], "data_dir": "/", "password": "12345" }, "zookeeper": { "hosts": "127.0.0.1:3379,127.0.0.1:3380" }, "kubernetes": { "namespace": "string", "labels": {}, "scope_label": "string", "role_label": "string", "use_endpoints": False, "pod_ip": "127.0.0.1", "ports": [{"name": "string", "port": 1000}], "retriable_http_codes": [401], }, "postgresql": { "listen": "127.0.0.2,::1:543", "connect_address": "127.0.0.2:543", "proxy_address": "127.0.0.2:5433", "authentication": { "replication": {"username": "user"}, "superuser": {"username": "user"}, "rewind": {"username": "user"}, }, "data_dir": os.path.join(tempfile.gettempdir(), "data_dir"), "bin_dir": os.path.join(tempfile.gettempdir(), "bin_dir"), "parameters": { "unix_socket_directories": "." }, "pg_hba": [u"string"], "pg_ident": ["string"], "pg_ctl_timeout": 1000, "use_pg_rewind": False }, "watchdog": { "mode": "off", "device": "string" }, "tags": { "nofailover": False, "clonefrom": False, "noloadbalance": False, "nosync": False } } config_2 = { "some_dir": "very_interesting_dir" } schema2 = Schema({ "some_dir": Directory(contains=["very_interesting_subdir", "another_interesting_subdir"]) }) required_binaries = ["pg_ctl", "initdb", "pg_controldata", "pg_basebackup", "postgres", "pg_isready"] directories = [] files = [] binaries = [] def isfile_side_effect(arg): return arg in files def which_side_effect(arg, path=None): binary = arg if path is None else os.path.join(path, arg) return arg if binary in binaries else None def isdir_side_effect(arg): return arg in directories def exists_side_effect(arg): return isfile_side_effect(arg) or isdir_side_effect(arg) def connect_side_effect(host_port): _, port = host_port if port < 1000: return 1 elif port < 10000: return 0 else: raise socket.gaierror() def mock_getaddrinfo(host, port, *args): if port is None or port == "": port = 0 port = int(port) if port not in range(0, 65536): raise socket.gaierror() if host == "127.0.0.1" or host == "" or host is None: return [(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP, '', ('127.0.0.1', port))] elif host == "127.0.0.2": return [(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP, '', ('127.0.0.2', port))] elif host == "::1": return [(socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_TCP, '', ('::1', port, 0, 0))] else: raise socket.gaierror() def parse_output(output): result = [] for s in output.split("\n"): x = s.split(" ")[0] if x and x not in result: result.append(x) result.sort() return result @patch('socket.socket.connect_ex', Mock(side_effect=connect_side_effect)) @patch('socket.getaddrinfo', Mock(side_effect=mock_getaddrinfo)) @patch('os.path.exists', Mock(side_effect=exists_side_effect)) @patch('os.path.isdir', Mock(side_effect=isdir_side_effect)) @patch('os.path.isfile', Mock(side_effect=isfile_side_effect)) @patch('shutil.which', Mock(side_effect=which_side_effect)) @patch('sys.stderr', new_callable=StringIO) @patch('sys.stdout', new_callable=StringIO) class TestValidator(unittest.TestCase): def setUp(self): del files[:] del directories[:] del binaries[:] def test_empty_config(self, mock_out, mock_err): errors = schema({}) output = "\n".join(errors) expected = list(sorted(['name', 'postgresql', 'restapi', 'scope'] + available_dcs)) self.assertEqual(expected, parse_output(output)) def test_complete_config(self, mock_out, mock_err): errors = schema(config) output = "\n".join(errors) self.assertEqual(['postgresql.bin_dir', 'raft.bind_addr', 'raft.self_addr'], parse_output(output)) def test_bin_dir_is_file(self, mock_out, mock_err): files.append(config["postgresql"]["data_dir"]) files.append(config["postgresql"]["bin_dir"]) c = copy.deepcopy(config) c["restapi"]["connect_address"] = 'False:blabla' c["postgresql"]["listen"] = '*:543' c["etcd"]["hosts"] = ["127.0.0.1:2379", "1244.0.0.1:2379", "127.0.0.1:invalidport"] c["kubernetes"]["pod_ip"] = "127.0.0.1111" errors = schema(c) output = "\n".join(errors) self.assertEqual(['etcd.hosts.1', 'etcd.hosts.2', 'kubernetes.pod_ip', 'postgresql.bin_dir', 'postgresql.data_dir', 'raft.bind_addr', 'raft.self_addr', 'restapi.connect_address'], parse_output(output)) @patch('socket.inet_pton', Mock(), create=True) def test_bin_dir_is_empty(self, mock_out, mock_err): directories.append(config["postgresql"]["data_dir"]) directories.append(config["postgresql"]["bin_dir"]) files.append(os.path.join(config["postgresql"]["data_dir"], "global", "pg_control")) c = copy.deepcopy(config) c["restapi"]["connect_address"] = "127.0.0.1:8008" c["kubernetes"]["pod_ip"] = "::1" c["consul"]["host"] = "127.0.0.1:50000" c["etcd"]["host"] = "127.0.0.1:237" c["postgresql"]["listen"] = "127.0.0.1:5432" with patch('patroni.validator.open', mock_open(read_data='9')): errors = schema(c) output = "\n".join(errors) self.assertEqual(['consul.host', 'etcd.host', 'postgresql.bin_dir', 'postgresql.data_dir', 'postgresql.listen', 'raft.bind_addr', 'raft.self_addr', 'restapi.connect_address'], parse_output(output)) def test_bin_dir_is_empty_string_excutables_in_path(self, mock_out, mock_err): binaries.extend(required_binaries) c = copy.deepcopy(config) c["postgresql"]["bin_dir"] = "" errors = schema(c) output = "\n".join(errors) self.assertEqual(['raft.bind_addr', 'raft.self_addr'], parse_output(output)) @patch('subprocess.check_output', Mock(return_value=b"postgres (PostgreSQL) 12.1")) def test_data_dir_contains_pg_version(self, mock_out, mock_err): directories.append(config["postgresql"]["data_dir"]) directories.append(config["postgresql"]["bin_dir"]) directories.append(os.path.join(config["postgresql"]["data_dir"], "pg_wal")) files.append(os.path.join(config["postgresql"]["data_dir"], "global", "pg_control")) files.append(os.path.join(config["postgresql"]["data_dir"], "PG_VERSION")) binaries.extend(required_binaries) c = copy.deepcopy(config) c["postgresql"]["bin_dir"] = "" # to cover postgres --version call from PATH with patch('patroni.validator.open', mock_open(read_data='12')): errors = schema(c) output = "\n".join(errors) self.assertEqual(['raft.bind_addr', 'raft.self_addr'], parse_output(output)) @patch('subprocess.check_output', Mock(return_value=b"postgres (PostgreSQL) 12.1")) def test_pg_version_missmatch(self, mock_out, mock_err): directories.append(config["postgresql"]["data_dir"]) directories.append(config["postgresql"]["bin_dir"]) directories.append(os.path.join(config["postgresql"]["data_dir"], "pg_wal")) files.append(os.path.join(config["postgresql"]["data_dir"], "global", "pg_control")) files.append(os.path.join(config["postgresql"]["data_dir"], "PG_VERSION")) binaries.extend([os.path.join(config["postgresql"]["bin_dir"], i) for i in required_binaries]) c = copy.deepcopy(config) c["etcd"]["hosts"] = [] c["postgresql"]["listen"] = '127.0.0.2,*:543' with patch('patroni.validator.open', mock_open(read_data='11')): errors = schema(c) output = "\n".join(errors) self.assertEqual(['etcd.hosts', 'postgresql.data_dir', 'postgresql.listen', 'raft.bind_addr', 'raft.self_addr'], parse_output(output)) @patch('subprocess.check_output', Mock(return_value=b"postgres (PostgreSQL) 12.1")) def test_pg_wal_doesnt_exist(self, mock_out, mock_err): binaries.extend([os.path.join(config["postgresql"]["bin_dir"], i) for i in required_binaries]) directories.append(config["postgresql"]["data_dir"]) directories.append(config["postgresql"]["bin_dir"]) files.append(os.path.join(config["postgresql"]["data_dir"], "global", "pg_control")) files.append(os.path.join(config["postgresql"]["data_dir"], "PG_VERSION")) c = copy.deepcopy(config) with patch('patroni.validator.open', mock_open(read_data='11')): errors = schema(c) output = "\n".join(errors) self.assertEqual(['postgresql.data_dir', 'raft.bind_addr', 'raft.self_addr'], parse_output(output)) def test_data_dir_is_empty_string(self, mock_out, mock_err): binaries.extend(required_binaries) directories.append(config["postgresql"]["data_dir"]) directories.append(config["postgresql"]["bin_dir"]) c = copy.deepcopy(config) c["kubernetes"] = False c["postgresql"]["pg_hba"] = "" c["postgresql"]["data_dir"] = "" c["postgresql"]["bin_dir"] = "" errors = schema(c) output = "\n".join(errors) self.assertEqual(['kubernetes', 'postgresql.data_dir', 'postgresql.pg_hba', 'raft.bind_addr', 'raft.self_addr'], parse_output(output)) def test_directory_contains(self, mock_out, mock_err): directories.extend([config_2["some_dir"], os.path.join(config_2["some_dir"], "very_interesting_subdir")]) errors = schema2(config_2) output = "\n".join(errors) self.assertEqual(['some_dir'], parse_output(output)) def test_validate_binary_name(self, mock_out, mock_err): r = copy.copy(required_binaries) r.remove('postgres') r.append('fake-postgres') binaries.extend(r) c = copy.deepcopy(config) c["postgresql"]["bin_name"] = {"postgres": "fake-postgres"} del c["postgresql"]["bin_dir"] errors = schema(c) output = "\n".join(errors) self.assertEqual(['raft.bind_addr', 'raft.self_addr'], parse_output(output)) def test_validate_binary_name_missing(self, mock_out, mock_err): r = copy.copy(required_binaries) r.remove('postgres') binaries.extend(r) c = copy.deepcopy(config) c["postgresql"]["bin_name"] = {"postgres": "fake-postgres"} del c["postgresql"]["bin_dir"] errors = schema(c) output = "\n".join(errors) self.assertEqual(['postgresql.bin_dir', 'postgresql.bin_name.postgres', 'raft.bind_addr', 'raft.self_addr'], parse_output(output)) def test_validate_binary_name_empty_string(self, mock_out, mock_err): r = copy.copy(required_binaries) binaries.extend(r) c = copy.deepcopy(config) c["postgresql"]["bin_name"] = {"postgres": ""} del c["postgresql"]["bin_dir"] errors = schema(c) output = "\n".join(errors) self.assertEqual(['postgresql.bin_dir', 'postgresql.bin_name.postgres', 'raft.bind_addr', 'raft.self_addr'], parse_output(output)) def test_one_of(self, _, __): c = copy.deepcopy(config) # Providing neither is fine del c["tags"]["nofailover"] errors = schema(c) self.assertNotIn("tags Multiple of ('nofailover', 'failover_priority') provided", errors) # Just nofailover is fine c["tags"]["nofailover"] = False errors = schema(c) self.assertNotIn("tags Multiple of ('nofailover', 'failover_priority') provided", errors) # Just failover_priority is fine del c["tags"]["nofailover"] c["tags"]["failover_priority"] = 1 errors = schema(c) self.assertNotIn("tags Multiple of ('nofailover', 'failover_priority') provided", errors) # Providing both is not fine c["tags"]["nofailover"] = False errors = schema(c) self.assertIn("tags Multiple of ('nofailover', 'failover_priority') provided", errors) def test_failover_priority_int(self, *args): c = copy.deepcopy(config) del c["tags"]["nofailover"] c["tags"]["failover_priority"] = 'a string' errors = schema(c) self.assertIn('tags.failover_priority a string is not an integer', errors) c = copy.deepcopy(config) del c["tags"]["nofailover"] c["tags"]["failover_priority"] = -6 errors = schema(c) self.assertIn('tags.failover_priority -6 didn\'t pass validation: Wrong value', errors) patroni-3.2.2/tests/test_wale_restore.py000066400000000000000000000152071455170150700204520ustar00rootroot00000000000000import subprocess import unittest import patroni.psycopg as psycopg from mock import Mock, PropertyMock, patch, mock_open from patroni.scripts import wale_restore from patroni.scripts.wale_restore import WALERestore, main as _main, get_major_version from threading import current_thread from . import MockConnect, psycopg_connect wale_output_header = ( b'name\tlast_modified\t' b'expanded_size_bytes\t' b'wal_segment_backup_start\twal_segment_offset_backup_start\t' b'wal_segment_backup_stop\twal_segment_offset_backup_stop\n' ) wale_output_values = ( b'base_00000001000000000000007F_00000040\t2015-05-18T10:13:25.000Z\t' b'167772160\t' b'00000001000000000000007F\t00000040\t' b'00000001000000000000007F\t00000240\n' ) wale_output = wale_output_header + wale_output_values wale_restore.RETRY_SLEEP_INTERVAL = 0.001 # Speed up retries WALE_TEST_RETRIES = 2 @patch('os.access', Mock(return_value=True)) @patch('os.makedirs', Mock(return_value=True)) @patch('os.path.exists', Mock(return_value=True)) @patch('os.path.isdir', Mock(return_value=True)) @patch('patroni.psycopg.connect', psycopg_connect) @patch('subprocess.check_output', Mock(return_value=wale_output)) class TestWALERestore(unittest.TestCase): def setUp(self): self.wale_restore = WALERestore('batman', '/data', 'host=batman port=5432 user=batman', '/etc', 100, 100, 1, 0, WALE_TEST_RETRIES) def test_should_use_s3_to_create_replica(self): self.__thread_ident = current_thread().ident sleeps = [0] def mock_sleep(*args): if current_thread().ident == self.__thread_ident: sleeps[0] += 1 self.assertTrue(self.wale_restore.should_use_s3_to_create_replica()) with patch.object(MockConnect, 'server_version', PropertyMock(return_value=100000)): self.assertTrue(self.wale_restore.should_use_s3_to_create_replica()) with patch('subprocess.check_output', Mock(return_value=wale_output.replace(b'167772160', b'1'))): self.assertFalse(self.wale_restore.should_use_s3_to_create_replica()) with patch('patroni.psycopg.connect', Mock(side_effect=psycopg.Error("foo"))): save_no_leader = self.wale_restore.no_leader save_leader_connection = self.wale_restore.leader_connection self.assertFalse(self.wale_restore.should_use_s3_to_create_replica()) with patch('time.sleep', mock_sleep): self.wale_restore.no_leader = 1 self.assertTrue(self.wale_restore.should_use_s3_to_create_replica()) # verify retries self.assertEqual(sleeps[0], WALE_TEST_RETRIES) self.wale_restore.leader_connection = '' self.assertTrue(self.wale_restore.should_use_s3_to_create_replica()) self.wale_restore.no_leader = save_no_leader self.wale_restore.leader_connection = save_leader_connection with patch('subprocess.check_output', Mock(side_effect=subprocess.CalledProcessError(1, "cmd", "foo"))): self.assertFalse(self.wale_restore.should_use_s3_to_create_replica()) with patch('subprocess.check_output', Mock(return_value=wale_output_header)): self.assertFalse(self.wale_restore.should_use_s3_to_create_replica()) with patch('subprocess.check_output', Mock(return_value=wale_output + wale_output_values)): self.assertFalse(self.wale_restore.should_use_s3_to_create_replica()) with patch('subprocess.check_output', Mock(return_value=wale_output.replace(b'expanded_size_bytes', b'expanded_size_foo'))): self.assertFalse(self.wale_restore.should_use_s3_to_create_replica()) def test_create_replica_with_s3(self): with patch('subprocess.call', Mock(return_value=0)): self.assertEqual(self.wale_restore.create_replica_with_s3(), 0) with patch.object(self.wale_restore, 'fix_subdirectory_path_if_broken', Mock(return_value=False)): self.assertEqual(self.wale_restore.create_replica_with_s3(), 2) with patch('subprocess.call', Mock(side_effect=Exception("foo"))): self.assertEqual(self.wale_restore.create_replica_with_s3(), 1) def test_run(self): self.wale_restore.init_error = True self.assertEqual(self.wale_restore.run(), 2) # this would do 2 retries 1 sec each self.wale_restore.init_error = False with patch.object(self.wale_restore, 'should_use_s3_to_create_replica', Mock(return_value=True)): with patch.object(self.wale_restore, 'create_replica_with_s3', Mock(return_value=0)): self.assertEqual(self.wale_restore.run(), 0) with patch.object(self.wale_restore, 'should_use_s3_to_create_replica', Mock(return_value=False)): self.assertEqual(self.wale_restore.run(), 2) with patch.object(self.wale_restore, 'should_use_s3_to_create_replica', Mock(return_value=None)): self.assertEqual(self.wale_restore.run(), 1) with patch.object(self.wale_restore, 'should_use_s3_to_create_replica', Mock(side_effect=Exception)): self.assertEqual(self.wale_restore.run(), 2) @patch('sys.exit', Mock()) def test_main(self): self.__thread_ident = current_thread().ident sleeps = [0] def mock_sleep(*args): if current_thread().ident == self.__thread_ident: sleeps[0] += 1 with patch.object(WALERestore, 'run', Mock(return_value=0)): self.assertEqual(_main(), 0) with patch.object(WALERestore, 'run', Mock(return_value=1)), \ patch('time.sleep', mock_sleep): self.assertEqual(_main(), 1) self.assertEqual(sleeps[0], WALE_TEST_RETRIES) @patch('os.path.isfile', Mock(return_value=True)) def test_get_major_version(self): with patch('builtins.open', mock_open(read_data='9.4')): self.assertEqual(get_major_version("data"), 9.4) with patch('builtins.open', side_effect=OSError): self.assertEqual(get_major_version("data"), 0.0) @patch('os.path.islink', Mock(return_value=True)) @patch('os.readlink', Mock(return_value="foo")) @patch('os.remove', Mock()) @patch('os.mkdir', Mock()) def test_fix_subdirectory_path_if_broken(self): with patch('os.path.exists', Mock(return_value=False)): # overriding the class-wide mock self.assertTrue(self.wale_restore.fix_subdirectory_path_if_broken("data1")) for fn in ('os.remove', 'os.mkdir'): with patch(fn, side_effect=OSError): self.assertFalse(self.wale_restore.fix_subdirectory_path_if_broken("data3")) patroni-3.2.2/tests/test_watchdog.py000066400000000000000000000216471455170150700175640ustar00rootroot00000000000000import ctypes import patroni.watchdog.linux as linuxwd import sys import unittest import os from mock import patch, Mock, PropertyMock from patroni.watchdog import Watchdog, WatchdogError from patroni.watchdog.base import NullWatchdog from patroni.watchdog.linux import LinuxWatchdogDevice class MockDevice(object): def __init__(self, fd, filename, flag): self.fd = fd self.filename = filename self.flag = flag self.timeout = 60 self.open = True self.writes = [] mock_devices = [None] def mock_open(filename, flag): fd = len(mock_devices) mock_devices.append(MockDevice(fd, filename, flag)) return fd def mock_ioctl(fd, op, arg=None, mutate_flag=False): assert 0 < fd < len(mock_devices) dev = mock_devices[fd] sys.stderr.write("Ioctl %d %d %r\n" % (fd, op, arg)) if op == linuxwd.WDIOC_GETSUPPORT: sys.stderr.write("Get support\n") assert (mutate_flag is True) arg.options = sum(map(linuxwd.WDIOF.get, ['SETTIMEOUT', 'KEEPALIVEPING'])) arg.identity = (ctypes.c_ubyte * 32)(*map(ord, 'Mock Watchdog')) elif op == linuxwd.WDIOC_GETTIMEOUT: arg.value = dev.timeout elif op == linuxwd.WDIOC_SETTIMEOUT: sys.stderr.write("Set timeout called with %s\n" % arg.value) assert 0 < arg.value < 65535 dev.timeout = arg.value - 1 else: raise Exception("Unknown op %d", op) return 0 def mock_write(fd, string): assert 0 < fd < len(mock_devices) assert len(string) == 1 assert mock_devices[fd].open mock_devices[fd].writes.append(string) def mock_close(fd): assert 0 < fd < len(mock_devices) assert mock_devices[fd].open mock_devices[fd].open = False @unittest.skipIf(os.name == 'nt', "Windows not supported") @patch('os.open', mock_open) @patch('os.write', mock_write) @patch('os.close', mock_close) @patch('fcntl.ioctl', mock_ioctl) class TestWatchdog(unittest.TestCase): def setUp(self): mock_devices[:] = [None] @patch('platform.system', Mock(return_value='Linux')) @patch.object(LinuxWatchdogDevice, 'can_be_disabled', PropertyMock(return_value=True)) def test_unsafe_timeout_disable_watchdog_and_exit(self): watchdog = Watchdog({'ttl': 30, 'loop_wait': 15, 'watchdog': {'mode': 'required', 'safety_margin': -1}}) self.assertEqual(watchdog.activate(), False) self.assertEqual(watchdog.is_running, False) @patch('platform.system', Mock(return_value='Linux')) @patch.object(LinuxWatchdogDevice, 'get_timeout', Mock(return_value=16)) def test_timeout_does_not_ensure_safe_termination(self): Watchdog({'ttl': 30, 'loop_wait': 15, 'watchdog': {'mode': 'auto', 'safety_margin': -1}}).activate() self.assertEqual(len(mock_devices), 2) @patch('platform.system', Mock(return_value='Linux')) @patch.object(Watchdog, 'is_running', PropertyMock(return_value=False)) def test_watchdog_not_activated(self): self.assertFalse(Watchdog({'ttl': 30, 'loop_wait': 10, 'watchdog': {'mode': 'required'}}).activate()) @patch('platform.system', Mock(return_value='Linux')) @patch.object(LinuxWatchdogDevice, 'is_running', PropertyMock(return_value=False)) def test_watchdog_activate(self): with patch.object(LinuxWatchdogDevice, 'open', Mock(side_effect=WatchdogError(''))): self.assertTrue(Watchdog({'ttl': 30, 'loop_wait': 10, 'watchdog': {'mode': 'auto'}}).activate()) self.assertFalse(Watchdog({'ttl': 30, 'loop_wait': 10, 'watchdog': {'mode': 'required'}}).activate()) @patch('platform.system', Mock(return_value='Linux')) def test_basic_operation(self): watchdog = Watchdog({'ttl': 30, 'loop_wait': 10, 'watchdog': {'mode': 'required'}}) watchdog.activate() self.assertEqual(len(mock_devices), 2) device = mock_devices[-1] self.assertTrue(device.open) self.assertEqual(device.timeout, 24) watchdog.keepalive() self.assertEqual(len(device.writes), 1) watchdog.impl._fd, fd = None, watchdog.impl._fd watchdog.keepalive() self.assertEqual(len(device.writes), 1) watchdog.impl._fd = fd watchdog.disable() self.assertFalse(device.open) self.assertEqual(device.writes[-1], b'V') def test_invalid_timings(self): watchdog = Watchdog({'ttl': 30, 'loop_wait': 20, 'watchdog': {'mode': 'automatic', 'safety_margin': -1}}) watchdog.activate() self.assertEqual(len(mock_devices), 1) self.assertFalse(watchdog.is_running) def test_parse_mode(self): with patch('patroni.watchdog.base.logger.warning', new_callable=Mock()) as warning_mock: watchdog = Watchdog({'ttl': 30, 'loop_wait': 10, 'watchdog': {'mode': 'bad'}}) self.assertEqual(watchdog.config.mode, 'off') warning_mock.assert_called_once() @patch('platform.system', Mock(return_value='Unknown')) def test_unsupported_platform(self): self.assertRaises(SystemExit, Watchdog, {'ttl': 30, 'loop_wait': 10, 'watchdog': {'mode': 'required', 'driver': 'bad'}}) def test_exceptions(self): wd = Watchdog({'ttl': 30, 'loop_wait': 10, 'watchdog': {'mode': 'bad'}}) wd.impl.close = wd.impl.keepalive = Mock(side_effect=WatchdogError('')) self.assertTrue(wd.activate()) self.assertIsNone(wd.keepalive()) self.assertIsNone(wd.disable()) @patch('platform.system', Mock(return_value='Linux')) def test_config_reload(self): watchdog = Watchdog({'ttl': 30, 'loop_wait': 15, 'watchdog': {'mode': 'required'}}) self.assertTrue(watchdog.activate()) self.assertTrue(watchdog.is_running) watchdog.reload_config({'ttl': 30, 'loop_wait': 15, 'watchdog': {'mode': 'off'}}) self.assertFalse(watchdog.is_running) watchdog.reload_config({'ttl': 30, 'loop_wait': 15, 'watchdog': {'mode': 'required'}}) self.assertFalse(watchdog.is_running) watchdog.keepalive() self.assertTrue(watchdog.is_running) watchdog.disable() watchdog.reload_config({'ttl': 30, 'loop_wait': 15, 'watchdog': {'mode': 'required', 'driver': 'unknown'}}) self.assertFalse(watchdog.is_healthy) self.assertFalse(watchdog.activate()) watchdog.reload_config({'ttl': 30, 'loop_wait': 15, 'watchdog': {'mode': 'required'}}) self.assertFalse(watchdog.is_running) watchdog.keepalive() self.assertTrue(watchdog.is_running) watchdog.reload_config({'ttl': 60, 'loop_wait': 15, 'watchdog': {'mode': 'required'}}) watchdog.keepalive() self.assertTrue(watchdog.is_running) self.assertEqual(watchdog.config.timeout, 60 - 5) watchdog.reload_config({'ttl': 60, 'loop_wait': 15, 'watchdog': {'mode': 'required', 'safety_margin': -1}}) watchdog.keepalive() self.assertTrue(watchdog.is_running) self.assertEqual(watchdog.config.timeout, 60 // 2) class TestNullWatchdog(unittest.TestCase): def test_basics(self): watchdog = NullWatchdog() self.assertTrue(watchdog.can_be_disabled) self.assertRaises(WatchdogError, watchdog.set_timeout, 1) self.assertEqual(watchdog.describe(), 'NullWatchdog') self.assertIsInstance(NullWatchdog.from_config({}), NullWatchdog) @unittest.skipIf(os.name == 'nt', "Windows not supported") class TestLinuxWatchdogDevice(unittest.TestCase): def setUp(self): self.impl = LinuxWatchdogDevice.from_config({}) @patch('os.open', Mock(return_value=3)) @patch('os.write', Mock(side_effect=OSError)) @patch('fcntl.ioctl', Mock(return_value=0)) def test_basics(self): self.impl.open() try: if self.impl.get_support().has_foo: self.assertFail() except Exception as e: self.assertTrue(isinstance(e, AttributeError)) self.assertRaises(WatchdogError, self.impl.close) self.assertRaises(WatchdogError, self.impl.keepalive) self.assertRaises(WatchdogError, self.impl.set_timeout, -1) @patch('os.open', Mock(return_value=3)) @patch('fcntl.ioctl', Mock(side_effect=OSError)) def test__ioctl(self): self.assertRaises(WatchdogError, self.impl.get_support) self.impl.open() self.assertRaises(WatchdogError, self.impl.get_support) def test_is_healthy(self): self.assertFalse(self.impl.is_healthy) @patch('os.open', Mock(return_value=3)) @patch('fcntl.ioctl', Mock(side_effect=OSError)) def test_error_handling(self): self.impl.open() self.assertRaises(WatchdogError, self.impl.get_timeout) self.assertRaises(WatchdogError, self.impl.set_timeout, 10) # We still try to output a reasonable string even if getting info errors self.assertEqual(self.impl.describe(), "Linux watchdog device") @patch('os.open', Mock(side_effect=OSError)) def test_open(self): self.assertRaises(WatchdogError, self.impl.open) patroni-3.2.2/tests/test_zookeeper.py000066400000000000000000000275421455170150700177670ustar00rootroot00000000000000import select import unittest from kazoo.client import KazooClient from kazoo.exceptions import NoNodeError, NodeExistsError from kazoo.handlers.threading import SequentialThreadingHandler from kazoo.protocol.states import KeeperState, WatchedEvent, ZnodeStat from kazoo.retry import RetryFailedError from mock import Mock, PropertyMock, patch from patroni.dcs.zookeeper import Cluster, PatroniKazooClient, \ PatroniSequentialThreadingHandler, ZooKeeper, ZooKeeperError class MockKazooClient(Mock): handler = PatroniSequentialThreadingHandler(10) leader = False exists = True def __init__(self, *args, **kwargs): super(MockKazooClient, self).__init__() self._session_timeout = 30000 @property def client_id(self): return (-1, '') @staticmethod def retry(func, *args, **kwargs): return func(*args, **kwargs) def get(self, path, watch=None): if not isinstance(path, str): raise TypeError("Invalid type for 'path' (string expected)") if path == '/broken/status': return (b'{', ZnodeStat(0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0)) elif path in ('/no_node', '/legacy/status'): raise NoNodeError elif '/members/' in path: return ( b'postgres://repuser:rep-pass@localhost:5434/postgres?application_name=http://127.0.0.1:8009/patroni', ZnodeStat(0, 0, 0, 0, 0, 0, 0, 0 if self.exists else -1, 0, 0, 0) ) elif path.endswith('/optime/leader'): return (b'500', ZnodeStat(0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0)) elif path.endswith('/leader'): if self.leader: return (b'foo', ZnodeStat(0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0)) return (b'foo', ZnodeStat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) elif path.endswith('/initialize'): return (b'foo', ZnodeStat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) elif path.endswith('/status'): return (b'{"optime":500,"slots":{"ls":1234567}}', ZnodeStat(0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0)) elif path.endswith('/failsafe'): return (b'{a}', ZnodeStat(0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0)) return (b'', ZnodeStat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) @staticmethod def get_children(path, watch=None, include_data=False): if not isinstance(path, str): raise TypeError("Invalid type for 'path' (string expected)") if path.startswith('/no_node'): raise NoNodeError elif path in ['/service/bla/', '/service/test/']: return ['initialize', 'leader', 'members', 'optime', 'failover', 'sync', 'failsafe', '0', '1'] return ['foo', 'bar', 'buzz'] def create(self, path, value=b"", acl=None, ephemeral=False, sequence=False, makepath=False): if not isinstance(path, str): raise TypeError("Invalid type for 'path' (string expected)") if not isinstance(value, bytes): raise TypeError("Invalid type for 'value' (must be a byte string)") if b'Exception' in value: raise Exception if path.endswith('/initialize') or path == '/service/test/optime/leader': raise Exception elif b'retry' in value or (b'exists' in value and self.exists): raise NodeExistsError def create_async(self, path, value=b"", acl=None, ephemeral=False, sequence=False, makepath=False): return self.create(path, value, acl, ephemeral, sequence, makepath) or Mock() @staticmethod def set(path, value, version=-1): if not isinstance(path, str): raise TypeError("Invalid type for 'path' (string expected)") if not isinstance(value, bytes): raise TypeError("Invalid type for 'value' (must be a byte string)") if path == '/service/bla/optime/leader': raise Exception if path == '/service/test/members/bar' and b'retry' in value: return if path in ('/service/test/failover', '/service/test/config', '/service/test/sync'): if b'Exception' in value: raise Exception elif value == b'ok': return raise NoNodeError def set_async(self, path, value, version=-1): return self.set(path, value, version) or Mock() def delete(self, path, version=-1, recursive=False): if not isinstance(path, str): raise TypeError("Invalid type for 'path' (string expected)") self.exists = False if path == '/service/test/leader': self.leader = True raise Exception elif path == '/service/test/members/buzz': raise Exception elif path.endswith('/') or path.endswith('/initialize') or path == '/service/test/members/bar': raise NoNodeError def delete_async(self, path, version=-1, recursive=False): return self.delete(path, version, recursive) or Mock() class TestPatroniSequentialThreadingHandler(unittest.TestCase): def setUp(self): self.handler = PatroniSequentialThreadingHandler(10) @patch.object(SequentialThreadingHandler, 'create_connection', Mock()) def test_create_connection(self): self.assertIsNotNone(self.handler.create_connection(())) self.assertIsNotNone(self.handler.create_connection((), 40)) self.assertIsNotNone(self.handler.create_connection(timeout=40)) def test_select(self): with patch.object(SequentialThreadingHandler, 'select', Mock(side_effect=ValueError)): self.assertRaises(select.error, self.handler.select) with patch.object(SequentialThreadingHandler, 'select', Mock(side_effect=IOError)): self.assertRaises(Exception, self.handler.select) class TestPatroniKazooClient(unittest.TestCase): def test__call(self): c = PatroniKazooClient() with patch.object(KazooClient, '_call', Mock()): self.assertIsNotNone(c._call(None, Mock())) c._state = KeeperState.CONNECTING self.assertFalse(c._call(None, Mock())) class TestZooKeeper(unittest.TestCase): @patch('patroni.dcs.zookeeper.PatroniKazooClient', MockKazooClient) def setUp(self): self.zk = ZooKeeper({'hosts': ['localhost:2181'], 'scope': 'test', 'name': 'foo', 'ttl': 30, 'retry_timeout': 10, 'loop_wait': 10, 'set_acls': {'CN=principal2': ['ALL']}}) def test_reload_config(self): self.zk.reload_config({'ttl': 20, 'retry_timeout': 10, 'loop_wait': 10}) self.zk.reload_config({'ttl': 20, 'retry_timeout': 10, 'loop_wait': 5}) def test_get_node(self): self.assertIsNone(self.zk.get_node('/no_node')) def test_get_children(self): self.assertListEqual(self.zk.get_children('/no_node'), []) def test__cluster_loader(self): self.zk._base_path = self.zk._base_path.replace('test', 'bla') self.zk._cluster_loader(self.zk.client_path('')) self.zk._base_path = self.zk._base_path = '/broken' self.zk._cluster_loader(self.zk.client_path('')) self.zk._base_path = self.zk._base_path = '/legacy' self.zk._cluster_loader(self.zk.client_path('')) self.zk._base_path = self.zk._base_path = '/no_node' self.zk._cluster_loader(self.zk.client_path('')) def test_get_cluster(self): cluster = self.zk.get_cluster() self.assertEqual(cluster.last_lsn, 500) def test__get_citus_cluster(self): self.zk._citus_group = '0' for _ in range(0, 2): cluster = self.zk.get_cluster() self.assertIsInstance(cluster, Cluster) self.assertIsInstance(cluster.workers[1], Cluster) @patch('patroni.dcs.zookeeper.logger.error') @patch.object(ZooKeeper, '_cluster_loader', Mock(side_effect=Exception)) def test_get_citus_coordinator(self, mock_logger): self.assertIsNone(self.zk.get_citus_coordinator()) mock_logger.assert_called_once() def test_delete_leader(self): self.assertTrue(self.zk.delete_leader(self.zk.get_cluster().leader)) def test_set_failover_value(self): self.zk.set_failover_value('') self.zk.set_failover_value('ok') self.zk.set_failover_value('Exception') def test_set_config_value(self): self.zk.set_config_value('', 1) self.zk.set_config_value('ok') self.zk.set_config_value('Exception') def test_initialize(self): self.assertFalse(self.zk.initialize()) def test_cancel_initialization(self): self.zk.cancel_initialization() with patch.object(MockKazooClient, 'delete', Mock()): self.zk.cancel_initialization() def test_touch_member(self): self.zk._name = 'buzz' self.zk.get_cluster() self.zk.touch_member({'new': 'new'}) self.zk._name = 'bar' self.zk.touch_member({'new': 'new'}) self.zk._name = 'na' self.zk._client.exists = 1 self.zk.touch_member({'Exception': 'Exception'}) self.zk._name = 'bar' self.zk.touch_member({'retry': 'retry'}) self.zk._fetch_cluster = True self.zk.get_cluster() self.zk.touch_member({'retry': 'retry'}) self.zk.touch_member({'conn_url': 'postgres://repuser:rep-pass@localhost:5434/postgres', 'api_url': 'http://127.0.0.1:8009/patroni'}) @patch.object(MockKazooClient, 'create', Mock(side_effect=[RetryFailedError, Exception])) def test_attempt_to_acquire_leader(self): self.assertRaises(ZooKeeperError, self.zk.attempt_to_acquire_leader) self.assertFalse(self.zk.attempt_to_acquire_leader()) def test_take_leader(self): self.zk.take_leader() with patch.object(MockKazooClient, 'create', Mock(side_effect=Exception)): self.zk.take_leader() def test_update_leader(self): leader = self.zk.get_cluster().leader self.assertFalse(self.zk.update_leader(leader, 12345)) with patch.object(MockKazooClient, 'delete', Mock(side_effect=RetryFailedError)): self.assertRaises(ZooKeeperError, self.zk.update_leader, leader, 12345) with patch.object(MockKazooClient, 'delete', Mock(side_effect=NoNodeError)): self.assertTrue(self.zk.update_leader(leader, 12345, failsafe={'foo': 'bar'})) with patch.object(MockKazooClient, 'create', Mock(side_effect=[RetryFailedError, Exception])): self.assertRaises(ZooKeeperError, self.zk.update_leader, leader, 12345) self.assertFalse(self.zk.update_leader(leader, 12345)) @patch.object(Cluster, 'min_version', PropertyMock(return_value=(2, 0))) def test_write_leader_optime(self): self.zk.last_lsn = '0' self.zk.write_leader_optime('1') with patch.object(MockKazooClient, 'create_async', Mock()): self.zk.write_leader_optime('1') with patch.object(MockKazooClient, 'set_async', Mock()): self.zk.write_leader_optime('2') self.zk._base_path = self.zk._base_path.replace('test', 'bla') self.zk.get_cluster() self.zk.write_leader_optime('3') def test_delete_cluster(self): self.assertTrue(self.zk.delete_cluster()) def test_watch(self): self.zk.event.wait = Mock() self.zk.watch(None, 0) self.zk.event.is_set = Mock(return_value=True) self.zk._fetch_status = False self.zk.watch(None, 0) def test__kazoo_connect(self): self.zk._client._retry.deadline = 1 self.zk._orig_kazoo_connect = Mock(return_value=(0, 0)) self.zk._kazoo_connect(None, None) def test_sync_state(self): self.zk.set_sync_state_value('') self.zk.set_sync_state_value('ok') self.zk.set_sync_state_value('Exception') self.zk.delete_sync_state() def test_set_history_value(self): self.zk.set_history_value('{}') def test_watcher(self): self.zk._watcher(WatchedEvent('', '', '')) self.assertTrue(self.zk.watch(1, 1)) patroni-3.2.2/tox.ini000066400000000000000000000132521455170150700145150ustar00rootroot00000000000000[common] python_matrix = {36,37,38,39,310,311} postgres_matrix = pg11: PG_MAJOR = 11 pg12: PG_MAJOR = 12 pg13: PG_MAJOR = 13 pg14: PG_MAJOR = 14 pg15: PG_MAJOR = 15 pg16: PG_MAJOR = 16 psycopg_deps = py{37,38,39,310,311}-{lin,win}: psycopg[binary] mac: psycopg2-binary py36: psycopg2-binary platforms = lin: linux mac: darwin win: win32 [tox] min_version = 4.0 requires = tox>4 env_list = dep lint py{[common]python_matrix}-test-{lin,mac,win} docs skipsdist = True toxworkdir = {env:TOX_WORK_DIR:.tox} skip_missing_interpreters = True [testenv] setenv = PYTHONDONTWRITEBYTECODE = 1 mac: OPEN_CMD = {env:OPEN_CMD:open} lin: OPEN_CMD = {env:OPEN_CMD:xdg-open} passenv = BROWSER DISPLAY [testenv:lint] description = Lint code with flake8 commands = flake8 {posargs:patroni tests setup.py} deps = flake8 [testenv:py{36,37,38,39,310,311}-test-{lin,win,mac}] description = Run unit tests with pytest labels = test commands_pre = - {tty:rm -f "{toxworkdir}{/}cov_report_{env_name}_html{/}index.html":true} - {tty:rm -f "{toxworkdir}{/}pytest_report_{env_name}.html":true} commands = pytest \ -p no:cacheprovider \ --verbose \ --doctest-modules \ --capture=fd \ --cov=patroni \ --cov-report=term-missing \ --cov-append \ {tty::--cov-report="xml\:{toxworkdir}{/}cov_report.{env_name}.xml"} \ {tty:--cov-report="html\:{toxworkdir}{/}cov_report_{env_name}_html":} \ {tty:--html="{toxworkdir}{/}pytest_report_{env_name}.html":} \ {posargs:tests patroni} commands_post = - {tty:{env:OPEN_CMD} "{toxworkdir}{/}cov_report_{env_name}_html{/}index.html":true} - {tty:{env:OPEN_CMD} "{toxworkdir}{/}pytest_report_{env_name}.html":true} deps = -r requirements.txt mock>=2.0.0 pytest pytest-cov pytest-html {[common]psycopg_deps} platform = {[common]platforms} allowlist_externals = rm true {env:OPEN_CMD} [testenv:dep] description = Check package dependency problems commands = pipdeptree -w fail deps = -r requirements.txt pipdeptree {[common]psycopg_deps} [testenv:py{37,38,39,310,311}-type-{lin,mac,win}] description = Run static type checking with pyright labels = type deps = -r requirements.txt pyright psycopg2-binary psycopg[binary] commands = pyright --venv-path {toxworkdir}{/}{envname} {posargs:patroni} platform = {[common]platforms} [testenv:black] description = Reformat code with black deps = black commands = black {posargs:patroni tests} [testenv:pg{12,13,14,15,16}-docker-build] description = Build docker containers needed for testing labels = behave docker-build setenv = {[common]postgres_matrix} DOCKER_BUILDKIT = 1 passenv = BASE_IMAGE commands = docker build . \ --tag patroni-dev:{env:PG_MAJOR} \ --build-arg PG_MAJOR \ --build-arg BASE_IMAGE={env:BASE_IMAGE:postgres} \ --file features/Dockerfile allowlist_externals = docker [testenv:pg{12,13,14,15,16}-docker-behave-{etcd,etcd3}-{lin,mac}] description = Run behaviour tests in patroni-dev docker container setenv = etcd: DCS=etcd etcd3: DCS=etcd3 {[common]postgres_matrix} CONTAINER_NAME = tox-{env_name}-{env:PYTHONHASHSEED} labels = behave depends = pg{11,12,13,14,15,16}-docker-build # There's a bug which affects calling multiple envs on the command line # This should be a valid command: tox -e 'py{36,37,38,39,310,311}-behave-{env:DCS}-lin' # Replaced with workaround, see https://github.com/tox-dev/tox/issues/2850 commands = docker run \ --volume {tox_root}:/src \ --env DCS={env:DCS} \ --hostname {env:CONTAINER_NAME} \ --name {env:CONTAINER_NAME} \ --rm \ --tty \ {env:PATRONI_DEV_IMAGE:patroni-dev:{env:PG_MAJOR}} \ tox run -x 'tox.env_list=py{[common]python_matrix}-behave-{env:DCS}-lin' \ -- {posargs} allowlist_externals = docker find platform = lin: linux ; win: win32 mac: darwin [testenv:py{36,38,39,310,311}-behave-{etcd,etcd3}-{lin,win,mac}] description = Run behaviour tests (locally with tox) deps = -r requirements.txt behave coverage {[common]psycopg_deps} setenv = etcd: DCS = {env:DCS:etcd} etcd3: DCS = {env:DCS:etcd3} passenv = ETCD_UNSUPPORTED_ARCH commands = python3 -m behave --format json --format plain --outfile result.json {posargs} mv result.json features/output allowlist_externals = mv platform = {[common]platforms} [testenv:docs-{lin,mac,win}] description = Build Sphinx documentation in HTML format labels: docs deps = -r requirements.docs.txt -r requirements.txt psycopg[binary] psycopg2-binary commands = sphinx-build \ -d "{envtmpdir}{/}doctree" docs "{toxworkdir}{/}docs_out" \ --color -b html \ -T -E -W --keep-going \ {posargs} commands_post = - {tty:{env:OPEN_CMD} "{toxworkdir}{/}docs_out{/}index.html":true:} allowlist_externals = true {env:OPEN_CMD} platform = {[common]platforms} [testenv:pdf-{lin,mac,win}] description = Build Sphinx documentation in PDF format labels: docs deps = -r requirements.docs.txt -r requirements.txt psycopg[binary] psycopg2-binary commands = python -m sphinx -T -E -b latex -d _build/doctrees -D language=en . pdf - latexmk -r pdf/latexmkrc -cd -C pdf/Patroni.tex latexmk -r pdf/latexmkrc -cd -pdf -f -dvi- -ps- -jobname=Patroni -interaction=nonstopmode pdf/Patroni.tex commands_post = - {tty:{env:OPEN_CMD} "pdf{/}Patroni.pdf":true:} allowlist_externals = true latexmk {env:OPEN_CMD} platform = {[common]platforms} change_dir = docs [flake8] max-line-length = 120 ignore = D401,W503 patroni-3.2.2/typings/000077500000000000000000000000001455170150700146745ustar00rootroot00000000000000patroni-3.2.2/typings/botocore/000077500000000000000000000000001455170150700165105ustar00rootroot00000000000000patroni-3.2.2/typings/botocore/__init__.pyi000066400000000000000000000000001455170150700207600ustar00rootroot00000000000000patroni-3.2.2/typings/botocore/exceptions.pyi000066400000000000000000000000421455170150700214100ustar00rootroot00000000000000class ClientError(Exception): ... patroni-3.2.2/typings/botocore/utils.pyi000066400000000000000000000013001455170150700203650ustar00rootroot00000000000000from typing import Any, Callable, Dict, Optional DEFAULT_METADATA_SERVICE_TIMEOUT = 1 METADATA_BASE_URL = 'http://169.254.169.254/' class AWSResponse: status_code: int @property def text(self) -> str: ... class IMDSFetcher: def __init__(self, timeout: float = DEFAULT_METADATA_SERVICE_TIMEOUT, num_attempts: int = 1, base_url: str = METADATA_BASE_URL, env: Optional[Dict[str, str]] = None, user_agent: Optional[str] = None, config: Optional[Dict[str, Any]] = None) -> None: ... def _fetch_metadata_token(self) -> Optional[str]: ...: def _get_request(self, url_path: str, retry_func: Optional[Callable[[AWSResponse], bool]] = None, token: Optional[str] = None) -> AWSResponse: ... patroni-3.2.2/typings/cdiff/000077500000000000000000000000001455170150700157475ustar00rootroot00000000000000patroni-3.2.2/typings/cdiff/__init__.pyi000066400000000000000000000002521455170150700202300ustar00rootroot00000000000000import io from typing import Any class PatchStream: def __init__(self, diff_hdl: io.TextIOBase) -> None: ... def markup_to_pager(stream: Any, opts: Any) -> None: ... patroni-3.2.2/typings/consul/000077500000000000000000000000001455170150700161775ustar00rootroot00000000000000patroni-3.2.2/typings/consul/__init__.pyi000066400000000000000000000001461455170150700204620ustar00rootroot00000000000000from consul.base import ConsulException, NotFound __all__ = ['ConsulException', 'Consul', 'NotFound'] patroni-3.2.2/typings/consul/base.pyi000066400000000000000000000034511455170150700176370ustar00rootroot00000000000000from typing import Any, Dict, List, Optional, Tuple class ConsulException(Exception): ... class NotFound(ConsulException): ... class Check: @classmethod def http(klass, url: str, interval: str, timeout: Optional[str] = None, deregister: Optional[str] = None) -> Dict[str, str]: ... class Consul: http: Any agent: 'Consul.Agent' session: 'Consul.Session' kv: 'Consul.KV' class KV: def get(self, key: str, index: Optional[int]=None, recurse: bool = False, wait: Optional[str] = None, token: Optional[str] = None, consistency: Optional[str] = None, keys: bool = False, separator: Optional[str] = '', dc: Optional[str] = None) -> Tuple[int, Dict[str, Any]]: ... def put(self, key: str, value: str, cas: Optional[int] = None, flags: Optional[int] = None, acquire: Optional[str] = None, release: Optional[str] = None, token: Optional[str] = None, dc: Optional[str] = None) -> bool: ... def delete(self, key: str, recurse: Optional[bool] = None, cas: Optional[int] = None, token: Optional[str] = None, dc: Optional[str] = None) -> bool: ... class Agent: service: 'Consul.Agent.Service' def self(self) -> Dict[str, Dict[str, Any]]: ... class Service: def register(self, name: str, service_id=..., address=..., port=..., tags=..., check=..., token=..., script=..., interval=..., ttl=..., http=..., timeout=..., enable_tag_override=...) -> bool: ... def deregister(self, service_id: str) -> bool: ... class Session: def create(self, name: Optional[str] = None, node: Optional[str] = [], checks: Optional[List[str]]=None, lock_delay: float = 15, behavior: str = 'release', ttl: Optional[int] = None, dc: Optional[str] = None) -> str: ... def renew(self, session_id: str, dc: Optional[str] = None) -> Optional[str]: ... patroni-3.2.2/typings/dns/000077500000000000000000000000001455170150700154605ustar00rootroot00000000000000patroni-3.2.2/typings/dns/resolver.pyi000066400000000000000000000013571455170150700200520ustar00rootroot00000000000000from typing import Union, Optional, Iterator class Name: def to_text(self, omit_final_dot: bool = ...) -> str: ... class Rdata: target: Name = ... port: int = ... class Answer: def __iter__(self) -> Iterator[Rdata]: ... def resolve(qname : str, rdtype : Union[int,str] = 0, rdclass : Union[int,str] = 0, tcp=False, source=None, raise_on_no_answer=True, source_port=0, lifetime : Optional[float]=None, search : Optional[bool]=None) -> Answer: ... def query(qname : str, rdtype : Union[int,str] = 0, rdclass : Union[int,str] = 0, tcp=False, source: Optional[str] = None, raise_on_no_answer=True, source_port=0, lifetime : Optional[float]=None) -> Answer: ... patroni-3.2.2/typings/etcd/000077500000000000000000000000001455170150700156135ustar00rootroot00000000000000patroni-3.2.2/typings/etcd/__init__.pyi000066400000000000000000000020661455170150700201010ustar00rootroot00000000000000from typing import Dict, Optional, Type, List from .client import Client __all__ = ['Client', 'EtcdError', 'EtcdException', 'EtcdEventIndexCleared', 'EtcdWatcherCleared', 'EtcdKeyNotFound', 'EtcdAlreadyExist', 'EtcdResult', 'EtcdConnectionFailed', 'EtcdWatchTimedOut'] class EtcdResult: action: str = ... modifiedIndex: int = ... key: str = ... value: str = ... ttl: Optional[float] = ... @property def leaves(self) -> List['EtcdResult']: ... class EtcdException(Exception): def __init__(self, message=..., payload=...) -> None: ... class EtcdConnectionFailed(EtcdException): def __init__(self, message=..., payload=..., cause=...) -> None: ... class EtcdKeyError(EtcdException): ... class EtcdKeyNotFound(EtcdKeyError): ... class EtcdAlreadyExist(EtcdKeyError): ... class EtcdEventIndexCleared(EtcdException): ... class EtcdWatchTimedOut(EtcdConnectionFailed): ... class EtcdWatcherCleared(EtcdException): ... class EtcdLeaderElectionInProgress(EtcdException): ... class EtcdError: error_exceptions: Dict[int, Type[EtcdException]] = ... patroni-3.2.2/typings/etcd/client.pyi000066400000000000000000000026171455170150700176220ustar00rootroot00000000000000import urllib3 from typing import Any, Optional, Set from . import EtcdResult class Client: _MGET: str _MPUT: str _MPOST: str _MDELETE: str _comparison_conditions: Set[str] _read_options: Set[str] _del_conditions: Set[str] http: urllib3.poolmanager.PoolManager _use_proxies: bool version_prefix: str username: Optional[str] password: Optional[str] def __init__(self, host=..., port=..., srv_domain=..., version_prefix=..., read_timeout=..., allow_redirect=..., protocol=..., cert=..., ca_cert=..., username=..., password=..., allow_reconnect=..., use_proxies=..., expected_cluster_id=..., per_host_pool_size=..., lock_prefix=...): ... @property def protocol(self) -> str: ... @property def read_timeout(self) -> int: ... @property def allow_redirect(self) -> bool: ... def write(self, key: str, value: str, ttl: int = ..., dir: bool = ..., append: bool = ..., **kwdargs: Any) -> EtcdResult: ... def read(self, key: str, **kwdargs: Any) -> EtcdResult: ... def delete(self, key: str, recursive: bool = ..., dir: bool = ..., **kwdargs: Any) -> EtcdResult: ... def set(self, key: str, value: str, ttl: int = ...) -> EtcdResult: ... def watch(self, key: str, index: int = ..., timeout: float = ..., recursive: bool = ...) -> EtcdResult: ... def _handle_server_response(self, response: urllib3.response.HTTPResponse) -> Any: ... patroni-3.2.2/typings/kazoo/000077500000000000000000000000001455170150700160175ustar00rootroot00000000000000patroni-3.2.2/typings/kazoo/client.pyi000066400000000000000000000043631455170150700200260ustar00rootroot00000000000000__all__ = ['KazooState', 'KazooClient', 'KazooRetry'] from kazoo.protocol.connection import ConnectionHandler from kazoo.protocol.states import KazooState, WatchedEvent, ZnodeStat from kazoo.handlers.threading import AsyncResult, SequentialThreadingHandler from kazoo.retry import KazooRetry from kazoo.security import ACL from typing import Any, Callable, Optional, Tuple, List class KazooClient: handler: SequentialThreadingHandler _state: str _connection: ConnectionHandler _session_timeout: int retry: Callable[..., Any] _retry: KazooRetry def __init__(self, hosts=..., timeout=..., client_id=..., handler=..., default_acl=..., auth_data=..., sasl_options=..., read_only=..., randomize_hosts=..., connection_retry=..., command_retry=..., logger=..., keyfile=..., keyfile_password=..., certfile=..., ca=..., use_ssl=..., verify_certs=..., **kwargs) -> None: ... @property def client_id(self) -> Optional[Tuple[Any]]: ... def add_listener(self, listener: Callable[[str], None]) -> None: ... def start(self, timeout: int = ...) -> None: ... def restart(self) -> None: ... def set_hosts(self, hosts: str, randomize_hosts: Optional[bool] = None) -> None: ... def create(self, path: str, value: bytes = b'', acl: Optional[ACL]=None, ephemeral: bool = False, sequence: bool = False, makepath: bool = False, include_data: bool = False) -> None: ... def create_async(self, path: str, value: bytes = b'', acl: Optional[ACL]=None, ephemeral: bool = False, sequence: bool = False, makepath: bool = False, include_data: bool = False) -> AsyncResult: ... def get(self, path: str, watch: Optional[Callable[[WatchedEvent], None]] = None) -> Tuple[bytes, ZnodeStat]: ... def get_children(self, path: str, watch: Optional[Callable[[WatchedEvent], None]] = None, include_data: bool = False) -> List[str]: ... def set(self, path: str, value: bytes, version: int = -1) -> ZnodeStat: ... def set_async(self, path: str, value: bytes, version: int = -1) -> AsyncResult: ... def delete(self, path: str, version: int = -1, recursive: bool = False) -> None: ... def delete_async(self, path: str, version: int = -1) -> AsyncResult: ... def _call(self, request: Tuple[Any], async_object: AsyncResult) -> Optional[bool]: ... patroni-3.2.2/typings/kazoo/exceptions.pyi000066400000000000000000000004361455170150700207260ustar00rootroot00000000000000class KazooException(Exception): ... class ZookeeperError(KazooException): ... class SessionExpiredError(ZookeeperError): ... class ConnectionClosedError(SessionExpiredError): ... class NoNodeError(ZookeeperError): ... class NodeExistsError(ZookeeperError): ... patroni-3.2.2/typings/kazoo/handlers/000077500000000000000000000000001455170150700176175ustar00rootroot00000000000000patroni-3.2.2/typings/kazoo/handlers/threading.pyi000066400000000000000000000004661455170150700223150ustar00rootroot00000000000000import socket from kazoo.handlers import utils from typing import Any class AsyncResult(utils.AsyncResult): ... class SequentialThreadingHandler: def select(self, *args: Any, **kwargs: Any) -> Any: ... def create_connection(self, *args: Any, **kwargs: Any) -> socket.socket: ... patroni-3.2.2/typings/kazoo/handlers/utils.pyi000066400000000000000000000003271455170150700215040ustar00rootroot00000000000000from typing import Any, Optional class AsyncResult: def set_exception(self, exception: Exception) -> None: ... def get(self, block: bool = False, timeout: Optional[float] = None) -> Any: ... patroni-3.2.2/typings/kazoo/protocol/000077500000000000000000000000001455170150700176605ustar00rootroot00000000000000patroni-3.2.2/typings/kazoo/protocol/connection.pyi000066400000000000000000000003061455170150700225410ustar00rootroot00000000000000import socket from typing import Any, Union, Tuple class ConnectionHandler: _socket: socket.socket def _connect(self, *args: Any) -> Tuple[Union[int, float], Union[int, float]]: ... patroni-3.2.2/typings/kazoo/protocol/states.pyi000066400000000000000000000010451455170150700217060ustar00rootroot00000000000000from typing import Any, NamedTuple class KazooState: SUSPENDED: str CONNECTED: str LOST: str class KeeperState: AUTH_FAILED: str CONNECTED: str CONNECTED_RO: str CONNECTING: str CLOSED: str EXPIRED_SESSION: str class WatchedEvent(NamedTuple): type: str state: str path: str class ZnodeStat(NamedTuple): czxid: int mzxid: int ctime: float mtime: float version: int cversion: int aversion: int ephemeralOwner: Any dataLength: int numChildren: int pzxid: int patroni-3.2.2/typings/kazoo/retry.pyi000066400000000000000000000004641455170150700177130ustar00rootroot00000000000000from kazoo.exceptions import KazooException class RetryFailedError(KazooException): ... class KazooRetry: deadline: float def __init__(self, max_tries=..., delay=..., backoff=..., max_jitter=..., max_delay=..., ignore_expire=..., sleep_func=..., deadline=..., interrupt=...) -> None: ... patroni-3.2.2/typings/kazoo/security.pyi000066400000000000000000000004011455170150700204040ustar00rootroot00000000000000from collections import namedtuple class ACL(namedtuple('ACL', 'perms id')): ... def make_acl(scheme: str, credential: str, read: bool = ..., write: bool = ..., create: bool = ..., delete: bool = ..., admin: bool = ..., all: bool = ...) -> ACL: ... patroni-3.2.2/typings/prettytable/000077500000000000000000000000001455170150700172335ustar00rootroot00000000000000patroni-3.2.2/typings/prettytable/__init__.pyi000066400000000000000000000007121455170150700215150ustar00rootroot00000000000000from typing import Any, Dict, List FRAME = 1 ALL = 1 class PrettyTable: def __init__(self, *args: str, **kwargs: Any) -> None: ... def _stringify_hrule(self, options: Dict[str, Any], where: str = '') -> str: ... @property def align(self) -> Dict[str, str]: ... @align.setter def align(self, val: str) -> None: ... def add_row(self, row: List[Any]) -> None: ... def __str__(self) -> str: ... def __repr__(self) -> str: ... patroni-3.2.2/typings/psycopg2/000077500000000000000000000000001455170150700164425ustar00rootroot00000000000000patroni-3.2.2/typings/psycopg2/__init__.pyi000066400000000000000000000030641455170150700207270ustar00rootroot00000000000000from collections.abc import Callable from typing import Any, TypeVar, overload # connection and cursor not available at runtime from psycopg2._psycopg import ( BINARY as BINARY, DATETIME as DATETIME, NUMBER as NUMBER, ROWID as ROWID, STRING as STRING, Binary as Binary, DatabaseError as DatabaseError, DataError as DataError, Date as Date, DateFromTicks as DateFromTicks, Error as Error, IntegrityError as IntegrityError, InterfaceError as InterfaceError, InternalError as InternalError, NotSupportedError as NotSupportedError, OperationalError as OperationalError, ProgrammingError as ProgrammingError, Time as Time, TimeFromTicks as TimeFromTicks, Timestamp as Timestamp, TimestampFromTicks as TimestampFromTicks, Warning as Warning, __libpq_version__ as __libpq_version__, apilevel as apilevel, connection as connection, cursor as cursor, paramstyle as paramstyle, threadsafety as threadsafety, ) __version__: str _T_conn = TypeVar("_T_conn", bound=connection) @overload def connect(dsn: str, connection_factory: Callable[..., _T_conn], cursor_factory: None = None, **kwargs: Any) -> _T_conn: ... @overload def connect( dsn: str | None = None, *, connection_factory: Callable[..., _T_conn], cursor_factory: None = None, **kwargs: Any ) -> _T_conn: ... @overload def connect( dsn: str | None = None, connection_factory: Callable[..., connection] | None = None, cursor_factory: Callable[..., cursor] | None = None, **kwargs: Any, ) -> connection: ... patroni-3.2.2/typings/psycopg2/_ipaddress.pyi000066400000000000000000000004451455170150700213050ustar00rootroot00000000000000from _typeshed import Incomplete from typing import Any ipaddress: Any def register_ipaddress(conn_or_curs: Incomplete | None = None) -> None: ... def cast_interface(s, cur: Incomplete | None = None): ... def cast_network(s, cur: Incomplete | None = None): ... def adapt_ipaddress(obj): ... patroni-3.2.2/typings/psycopg2/_json.pyi000066400000000000000000000015401455170150700202750ustar00rootroot00000000000000from _typeshed import Incomplete from typing import Any JSON_OID: int JSONARRAY_OID: int JSONB_OID: int JSONBARRAY_OID: int class Json: adapted: Any def __init__(self, adapted, dumps: Incomplete | None = None) -> None: ... def __conform__(self, proto): ... def dumps(self, obj): ... def prepare(self, conn) -> None: ... def getquoted(self): ... def register_json( conn_or_curs: Incomplete | None = None, globally: bool = False, loads: Incomplete | None = None, oid: Incomplete | None = None, array_oid: Incomplete | None = None, name: str = "json", ): ... def register_default_json(conn_or_curs: Incomplete | None = None, globally: bool = False, loads: Incomplete | None = None): ... def register_default_jsonb(conn_or_curs: Incomplete | None = None, globally: bool = False, loads: Incomplete | None = None): ... patroni-3.2.2/typings/psycopg2/_psycopg.pyi000066400000000000000000000355771455170150700210310ustar00rootroot00000000000000from collections.abc import Callable, Iterable, Mapping, Sequence from types import TracebackType from typing import Any, TypeVar, overload from typing_extensions import Literal, Self, TypeAlias import psycopg2 import psycopg2.extensions from psycopg2.sql import Composable _Vars: TypeAlias = Sequence[Any] | Mapping[str, Any] | None BINARY: Any BINARYARRAY: Any BOOLEAN: Any BOOLEANARRAY: Any BYTES: Any BYTESARRAY: Any CIDRARRAY: Any DATE: Any DATEARRAY: Any DATETIME: Any DATETIMEARRAY: Any DATETIMETZ: Any DATETIMETZARRAY: Any DECIMAL: Any DECIMALARRAY: Any FLOAT: Any FLOATARRAY: Any INETARRAY: Any INTEGER: Any INTEGERARRAY: Any INTERVAL: Any INTERVALARRAY: Any LONGINTEGER: Any LONGINTEGERARRAY: Any MACADDRARRAY: Any NUMBER: Any PYDATE: Any PYDATEARRAY: Any PYDATETIME: Any PYDATETIMEARRAY: Any PYDATETIMETZ: Any PYDATETIMETZARRAY: Any PYINTERVAL: Any PYINTERVALARRAY: Any PYTIME: Any PYTIMEARRAY: Any REPLICATION_LOGICAL: int REPLICATION_PHYSICAL: int ROWID: Any ROWIDARRAY: Any STRING: Any STRINGARRAY: Any TIME: Any TIMEARRAY: Any UNICODE: Any UNICODEARRAY: Any UNKNOWN: Any adapters: dict[Any, Any] apilevel: str binary_types: dict[Any, Any] encodings: dict[Any, Any] paramstyle: str sqlstate_errors: dict[Any, Any] string_types: dict[Any, Any] threadsafety: int __libpq_version__: int class cursor: arraysize: int binary_types: Any closed: Any connection: Any description: Any itersize: Any lastrowid: Any name: Any pgresult_ptr: Any query: Any row_factory: Any rowcount: int rownumber: int scrollable: bool | None statusmessage: Any string_types: Any typecaster: Any tzinfo_factory: Any withhold: bool def __init__(self, conn: connection, name: str | bytes | None = ...) -> None: ... def callproc(self, procname, parameters=...): ... def cast(self, oid, s): ... def close(self): ... def copy_expert(self, sql: str | bytes | Composable, file, size=...): ... def copy_from(self, file, table, sep=..., null=..., size=..., columns=...): ... def copy_to(self, file, table, sep=..., null=..., columns=...): ... def execute(self, query: str | bytes | Composable, vars: _Vars = ...) -> None: ... def executemany(self, query: str | bytes | Composable, vars_list: Iterable[_Vars]) -> None: ... def fetchall(self) -> list[tuple[Any, ...]]: ... def fetchmany(self, size: int | None = ...) -> list[tuple[Any, ...]]: ... def fetchone(self) -> tuple[Any, ...] | None: ... def mogrify(self, *args, **kwargs): ... def nextset(self): ... def scroll(self, value, mode=...): ... def setinputsizes(self, sizes): ... def setoutputsize(self, size, column=...): ... def __enter__(self) -> Self: ... def __exit__( self, type: type[BaseException] | None, value: BaseException | None, traceback: TracebackType | None ) -> None: ... def __iter__(self) -> Self: ... def __next__(self) -> tuple[Any, ...]: ... _Cursor: TypeAlias = cursor class AsIs: adapted: Any def __init__(self, *args, **kwargs) -> None: ... def getquoted(self, *args, **kwargs): ... def __conform__(self, *args, **kwargs): ... class Binary: adapted: Any buffer: Any def __init__(self, *args, **kwargs) -> None: ... def getquoted(self, *args, **kwargs): ... def prepare(self, conn): ... def __conform__(self, *args, **kwargs): ... class Boolean: adapted: Any def __init__(self, *args, **kwargs) -> None: ... def getquoted(self, *args, **kwargs): ... def __conform__(self, *args, **kwargs): ... class Column: display_size: Any internal_size: Any name: Any null_ok: Any precision: Any scale: Any table_column: Any table_oid: Any type_code: Any def __init__(self, *args, **kwargs) -> None: ... def __eq__(self, __other): ... def __ge__(self, __other): ... def __getitem__(self, __index): ... def __getstate__(self): ... def __gt__(self, __other): ... def __le__(self, __other): ... def __len__(self) -> int: ... def __lt__(self, __other): ... def __ne__(self, __other): ... def __setstate__(self, state): ... class ConnectionInfo: # Note: the following properties can be None if their corresponding libpq function # returns NULL. They're not annotated as such, because this is very unlikely in # practice---the psycopg2 docs [1] don't even mention this as a possibility! # # - db_name # - user # - password # - host # - port # - options # # (To prove this, one needs to inspect the psycopg2 source code [2], plus the # documentation [3] and source code [4] of the corresponding libpq calls.) # # [1]: https://www.psycopg.org/docs/extensions.html#psycopg2.extensions.ConnectionInfo # [2]: https://github.com/psycopg/psycopg2/blob/1d3a89a0bba621dc1cc9b32db6d241bd2da85ad1/psycopg/conninfo_type.c#L52 and below # [3]: https://www.postgresql.org/docs/current/libpq-status.html # [4]: https://github.com/postgres/postgres/blob/b39838889e76274b107935fa8e8951baf0e8b31b/src/interfaces/libpq/fe-connect.c#L6754 and below @property def backend_pid(self) -> int: ... @property def dbname(self) -> str: ... @property def dsn_parameters(self) -> dict[str, str]: ... @property def error_message(self) -> str | None: ... @property def host(self) -> str: ... @property def needs_password(self) -> bool: ... @property def options(self) -> str: ... @property def password(self) -> str: ... @property def port(self) -> int: ... @property def protocol_version(self) -> int: ... @property def server_version(self) -> int: ... @property def socket(self) -> int: ... @property def ssl_attribute_names(self) -> list[str]: ... @property def ssl_in_use(self) -> bool: ... @property def status(self) -> int: ... @property def transaction_status(self) -> int: ... @property def used_password(self) -> bool: ... @property def user(self) -> str: ... def __init__(self, *args, **kwargs) -> None: ... def parameter_status(self, name: str) -> str | None: ... def ssl_attribute(self, name: str) -> str | None: ... class DataError(psycopg2.DatabaseError): ... class DatabaseError(psycopg2.Error): ... class Decimal: adapted: Any def __init__(self, *args, **kwargs) -> None: ... def getquoted(self, *args, **kwargs): ... def __conform__(self, *args, **kwargs): ... class Diagnostics: column_name: str | None constraint_name: str | None context: str | None datatype_name: str | None internal_position: str | None internal_query: str | None message_detail: str | None message_hint: str | None message_primary: str | None schema_name: str | None severity: str | None severity_nonlocalized: str | None source_file: str | None source_function: str | None source_line: str | None sqlstate: str | None statement_position: str | None table_name: str | None def __init__(self, __err: Error) -> None: ... class Error(Exception): cursor: _Cursor | None diag: Diagnostics pgcode: str | None pgerror: str | None def __init__(self, *args, **kwargs) -> None: ... def __reduce__(self): ... def __setstate__(self, state): ... class Float: adapted: Any def __init__(self, *args, **kwargs) -> None: ... def getquoted(self, *args, **kwargs): ... def __conform__(self, *args, **kwargs): ... class ISQLQuote: _wrapped: Any def __init__(self, *args, **kwargs) -> None: ... def getbinary(self, *args, **kwargs): ... def getbuffer(self, *args, **kwargs): ... def getquoted(self, *args, **kwargs) -> bytes: ... class Int: adapted: Any def __init__(self, *args, **kwargs) -> None: ... def getquoted(self, *args, **kwargs): ... def __conform__(self, *args, **kwargs): ... class IntegrityError(psycopg2.DatabaseError): ... class InterfaceError(psycopg2.Error): ... class InternalError(psycopg2.DatabaseError): ... class List: adapted: Any def __init__(self, *args, **kwargs) -> None: ... def getquoted(self, *args, **kwargs): ... def prepare(self, *args, **kwargs): ... def __conform__(self, *args, **kwargs): ... class NotSupportedError(psycopg2.DatabaseError): ... class Notify: channel: Any payload: Any pid: Any def __init__(self, *args, **kwargs) -> None: ... def __eq__(self, __other): ... def __ge__(self, __other): ... def __getitem__(self, __index): ... def __gt__(self, __other): ... def __hash__(self) -> int: ... def __le__(self, __other): ... def __len__(self) -> int: ... def __lt__(self, __other): ... def __ne__(self, __other): ... class OperationalError(psycopg2.DatabaseError): ... class ProgrammingError(psycopg2.DatabaseError): ... class QueryCanceledError(psycopg2.OperationalError): ... class QuotedString: adapted: Any buffer: Any encoding: Any def __init__(self, *args, **kwargs) -> None: ... def getquoted(self, *args, **kwargs): ... def prepare(self, *args, **kwargs): ... def __conform__(self, *args, **kwargs): ... class ReplicationConnection(psycopg2.extensions.connection): autocommit: Any isolation_level: Any replication_type: Any reset: Any set_isolation_level: Any set_session: Any def __init__(self, *args, **kwargs) -> None: ... class ReplicationCursor(cursor): feedback_timestamp: Any io_timestamp: Any wal_end: Any def __init__(self, *args, **kwargs) -> None: ... def consume_stream(self, consumer, keepalive_interval=...): ... def read_message(self, *args, **kwargs): ... def send_feedback(self, write_lsn=..., flush_lsn=..., apply_lsn=..., reply=..., force=...): ... def start_replication_expert(self, command, decode=..., status_interval=...): ... class ReplicationMessage: cursor: Any data_size: Any data_start: Any payload: Any send_time: Any wal_end: Any def __init__(self, *args, **kwargs) -> None: ... class TransactionRollbackError(psycopg2.OperationalError): ... class Warning(Exception): ... class Xid: bqual: Any database: Any format_id: Any gtrid: Any owner: Any prepared: Any def __init__(self, *args, **kwargs) -> None: ... def from_string(self, *args, **kwargs): ... def __getitem__(self, __index): ... def __len__(self) -> int: ... _T_cur = TypeVar("_T_cur", bound=cursor) class connection: DataError: Any DatabaseError: Any Error: Any IntegrityError: Any InterfaceError: Any InternalError: Any NotSupportedError: Any OperationalError: Any ProgrammingError: Any Warning: Any @property def async_(self) -> int: ... autocommit: bool @property def binary_types(self) -> Any: ... @property def closed(self) -> int: ... cursor_factory: Callable[..., _Cursor] @property def dsn(self) -> str: ... @property def encoding(self) -> str: ... @property def info(self) -> ConnectionInfo: ... @property def isolation_level(self) -> int | None: ... @isolation_level.setter def isolation_level(self, __value: str | bytes | int | None) -> None: ... notices: list[Any] notifies: list[Any] @property def pgconn_ptr(self) -> int | None: ... @property def protocol_version(self) -> int: ... @property def deferrable(self) -> bool | None: ... @deferrable.setter def deferrable(self, __value: Literal["default"] | bool | None) -> None: ... @property def readonly(self) -> bool | None: ... @readonly.setter def readonly(self, __value: Literal["default"] | bool | None) -> None: ... @property def server_version(self) -> int: ... @property def status(self) -> int: ... @property def string_types(self) -> Any: ... # Really it's dsn: str, async: int = ..., async_: int = ..., but # that would be a syntax error. def __init__(self, dsn: str, *, async_: int = ...) -> None: ... def cancel(self) -> None: ... def close(self) -> None: ... def commit(self) -> None: ... @overload def cursor(self, name: str | bytes | None = ..., *, withhold: bool = ..., scrollable: bool | None = ...) -> _Cursor: ... def fileno(self) -> int: ... def get_backend_pid(self) -> int: ... def get_dsn_parameters(self) -> dict[str, str]: ... def get_native_connection(self): ... def get_parameter_status(self, parameter: str) -> str | None: ... def get_transaction_status(self) -> int: ... def isexecuting(self) -> bool: ... def lobject( self, oid: int = ..., mode: str | None = ..., new_oid: int = ..., new_file: str | None = ..., lobject_factory: type[lobject] = ..., ) -> lobject: ... def poll(self) -> int: ... def reset(self) -> None: ... def rollback(self) -> None: ... def set_client_encoding(self, encoding: str) -> None: ... def set_isolation_level(self, level: int | None) -> None: ... def set_session( self, isolation_level: str | bytes | int | None = ..., readonly: bool | Literal["default", b"default"] | None = ..., deferrable: bool | Literal["default", b"default"] | None = ..., autocommit: bool = ..., ) -> None: ... def tpc_begin(self, xid: str | bytes | Xid) -> None: ... def tpc_commit(self, __xid: str | bytes | Xid = ...) -> None: ... def tpc_prepare(self) -> None: ... def tpc_recover(self) -> list[Xid]: ... def tpc_rollback(self, __xid: str | bytes | Xid = ...) -> None: ... def xid(self, format_id, gtrid, bqual) -> Xid: ... def __enter__(self) -> Self: ... def __exit__(self, __type: type[BaseException] | None, __name: BaseException | None, __tb: TracebackType | None) -> None: ... class lobject: closed: Any mode: Any oid: Any def __init__(self, *args, **kwargs) -> None: ... def close(self): ... def export(self, filename): ... def read(self, size=...): ... def seek(self, offset, whence=...): ... def tell(self): ... def truncate(self, len=...): ... def unlink(self): ... def write(self, str): ... def Date(year, month, day): ... def DateFromPy(*args, **kwargs): ... def DateFromTicks(ticks): ... def IntervalFromPy(*args, **kwargs): ... def Time(hour, minutes, seconds, tzinfo=...): ... def TimeFromPy(*args, **kwargs): ... def TimeFromTicks(ticks): ... def Timestamp(year, month, day, hour, minutes, seconds, tzinfo=...): ... def TimestampFromPy(*args, **kwargs): ... def TimestampFromTicks(ticks): ... def _connect(*args, **kwargs): ... def adapt(*args, **kwargs): ... def encrypt_password(*args, **kwargs): ... def get_wait_callback(*args, **kwargs): ... def libpq_version(*args, **kwargs): ... def new_array_type(oids, name, baseobj): ... def new_type(oids, name, castobj): ... def parse_dsn(dsn: str | bytes) -> dict[str, Any]: ... def quote_ident(value: Any, scope: connection | cursor | None) -> str: ... def register_type(*args, **kwargs): ... def set_wait_callback(_none): ... patroni-3.2.2/typings/psycopg2/_range.pyi000066400000000000000000000032311455170150700204170ustar00rootroot00000000000000from _typeshed import Incomplete from typing import Any class Range: def __init__( self, lower: Incomplete | None = None, upper: Incomplete | None = None, bounds: str = "[)", empty: bool = False ) -> None: ... @property def lower(self): ... @property def upper(self): ... @property def isempty(self): ... @property def lower_inf(self): ... @property def upper_inf(self): ... @property def lower_inc(self): ... @property def upper_inc(self): ... def __contains__(self, x): ... def __bool__(self) -> bool: ... def __eq__(self, other): ... def __ne__(self, other): ... def __hash__(self) -> int: ... def __lt__(self, other): ... def __le__(self, other): ... def __gt__(self, other): ... def __ge__(self, other): ... def register_range(pgrange, pyrange, conn_or_curs, globally: bool = False): ... class RangeAdapter: name: Any adapted: Any def __init__(self, adapted) -> None: ... def __conform__(self, proto): ... def prepare(self, conn) -> None: ... def getquoted(self): ... class RangeCaster: subtype_oid: Any typecaster: Any array_typecaster: Any def __init__(self, pgrange, pyrange, oid, subtype_oid, array_oid: Incomplete | None = None) -> None: ... def parse(self, s, cur: Incomplete | None = None): ... class NumericRange(Range): ... class DateRange(Range): ... class DateTimeRange(Range): ... class DateTimeTZRange(Range): ... class NumberRangeAdapter(RangeAdapter): def getquoted(self): ... int4range_caster: Any int8range_caster: Any numrange_caster: Any daterange_caster: Any tsrange_caster: Any tstzrange_caster: Any patroni-3.2.2/typings/psycopg2/errorcodes.pyi000066400000000000000000000221331455170150700213350ustar00rootroot00000000000000def lookup(code, _cache={}): ... CLASS_SUCCESSFUL_COMPLETION: str CLASS_WARNING: str CLASS_NO_DATA: str CLASS_SQL_STATEMENT_NOT_YET_COMPLETE: str CLASS_CONNECTION_EXCEPTION: str CLASS_TRIGGERED_ACTION_EXCEPTION: str CLASS_FEATURE_NOT_SUPPORTED: str CLASS_INVALID_TRANSACTION_INITIATION: str CLASS_LOCATOR_EXCEPTION: str CLASS_INVALID_GRANTOR: str CLASS_INVALID_ROLE_SPECIFICATION: str CLASS_DIAGNOSTICS_EXCEPTION: str CLASS_CASE_NOT_FOUND: str CLASS_CARDINALITY_VIOLATION: str CLASS_DATA_EXCEPTION: str CLASS_INTEGRITY_CONSTRAINT_VIOLATION: str CLASS_INVALID_CURSOR_STATE: str CLASS_INVALID_TRANSACTION_STATE: str CLASS_INVALID_SQL_STATEMENT_NAME: str CLASS_TRIGGERED_DATA_CHANGE_VIOLATION: str CLASS_INVALID_AUTHORIZATION_SPECIFICATION: str CLASS_DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST: str CLASS_INVALID_TRANSACTION_TERMINATION: str CLASS_SQL_ROUTINE_EXCEPTION: str CLASS_INVALID_CURSOR_NAME: str CLASS_EXTERNAL_ROUTINE_EXCEPTION: str CLASS_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION: str CLASS_SAVEPOINT_EXCEPTION: str CLASS_INVALID_CATALOG_NAME: str CLASS_INVALID_SCHEMA_NAME: str CLASS_TRANSACTION_ROLLBACK: str CLASS_SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION: str CLASS_WITH_CHECK_OPTION_VIOLATION: str CLASS_INSUFFICIENT_RESOURCES: str CLASS_PROGRAM_LIMIT_EXCEEDED: str CLASS_OBJECT_NOT_IN_PREREQUISITE_STATE: str CLASS_OPERATOR_INTERVENTION: str CLASS_SYSTEM_ERROR: str CLASS_SNAPSHOT_FAILURE: str CLASS_CONFIGURATION_FILE_ERROR: str CLASS_FOREIGN_DATA_WRAPPER_ERROR: str CLASS_PL_PGSQL_ERROR: str CLASS_INTERNAL_ERROR: str SUCCESSFUL_COMPLETION: str WARNING: str NULL_VALUE_ELIMINATED_IN_SET_FUNCTION: str STRING_DATA_RIGHT_TRUNCATION_: str PRIVILEGE_NOT_REVOKED: str PRIVILEGE_NOT_GRANTED: str IMPLICIT_ZERO_BIT_PADDING: str DYNAMIC_RESULT_SETS_RETURNED: str DEPRECATED_FEATURE: str NO_DATA: str NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED: str SQL_STATEMENT_NOT_YET_COMPLETE: str CONNECTION_EXCEPTION: str SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION: str CONNECTION_DOES_NOT_EXIST: str SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION: str CONNECTION_FAILURE: str TRANSACTION_RESOLUTION_UNKNOWN: str PROTOCOL_VIOLATION: str TRIGGERED_ACTION_EXCEPTION: str FEATURE_NOT_SUPPORTED: str INVALID_TRANSACTION_INITIATION: str LOCATOR_EXCEPTION: str INVALID_LOCATOR_SPECIFICATION: str INVALID_GRANTOR: str INVALID_GRANT_OPERATION: str INVALID_ROLE_SPECIFICATION: str DIAGNOSTICS_EXCEPTION: str STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER: str CASE_NOT_FOUND: str CARDINALITY_VIOLATION: str DATA_EXCEPTION: str STRING_DATA_RIGHT_TRUNCATION: str NULL_VALUE_NO_INDICATOR_PARAMETER: str NUMERIC_VALUE_OUT_OF_RANGE: str NULL_VALUE_NOT_ALLOWED_: str ERROR_IN_ASSIGNMENT: str INVALID_DATETIME_FORMAT: str DATETIME_FIELD_OVERFLOW: str INVALID_TIME_ZONE_DISPLACEMENT_VALUE: str ESCAPE_CHARACTER_CONFLICT: str INVALID_USE_OF_ESCAPE_CHARACTER: str INVALID_ESCAPE_OCTET: str ZERO_LENGTH_CHARACTER_STRING: str MOST_SPECIFIC_TYPE_MISMATCH: str SEQUENCE_GENERATOR_LIMIT_EXCEEDED: str NOT_AN_XML_DOCUMENT: str INVALID_XML_DOCUMENT: str INVALID_XML_CONTENT: str INVALID_XML_COMMENT: str INVALID_XML_PROCESSING_INSTRUCTION: str INVALID_INDICATOR_PARAMETER_VALUE: str SUBSTRING_ERROR: str DIVISION_BY_ZERO: str INVALID_PRECEDING_OR_FOLLOWING_SIZE: str INVALID_ARGUMENT_FOR_NTILE_FUNCTION: str INTERVAL_FIELD_OVERFLOW: str INVALID_ARGUMENT_FOR_NTH_VALUE_FUNCTION: str INVALID_CHARACTER_VALUE_FOR_CAST: str INVALID_ESCAPE_CHARACTER: str INVALID_REGULAR_EXPRESSION: str INVALID_ARGUMENT_FOR_LOGARITHM: str INVALID_ARGUMENT_FOR_POWER_FUNCTION: str INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION: str INVALID_ROW_COUNT_IN_LIMIT_CLAUSE: str INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE: str INVALID_LIMIT_VALUE: str CHARACTER_NOT_IN_REPERTOIRE: str INDICATOR_OVERFLOW: str INVALID_PARAMETER_VALUE: str UNTERMINATED_C_STRING: str INVALID_ESCAPE_SEQUENCE: str STRING_DATA_LENGTH_MISMATCH: str TRIM_ERROR: str ARRAY_SUBSCRIPT_ERROR: str INVALID_TABLESAMPLE_REPEAT: str INVALID_TABLESAMPLE_ARGUMENT: str DUPLICATE_JSON_OBJECT_KEY_VALUE: str INVALID_ARGUMENT_FOR_SQL_JSON_DATETIME_FUNCTION: str INVALID_JSON_TEXT: str INVALID_SQL_JSON_SUBSCRIPT: str MORE_THAN_ONE_SQL_JSON_ITEM: str NO_SQL_JSON_ITEM: str NON_NUMERIC_SQL_JSON_ITEM: str NON_UNIQUE_KEYS_IN_A_JSON_OBJECT: str SINGLETON_SQL_JSON_ITEM_REQUIRED: str SQL_JSON_ARRAY_NOT_FOUND: str SQL_JSON_MEMBER_NOT_FOUND: str SQL_JSON_NUMBER_NOT_FOUND: str SQL_JSON_OBJECT_NOT_FOUND: str TOO_MANY_JSON_ARRAY_ELEMENTS: str TOO_MANY_JSON_OBJECT_MEMBERS: str SQL_JSON_SCALAR_REQUIRED: str FLOATING_POINT_EXCEPTION: str INVALID_TEXT_REPRESENTATION: str INVALID_BINARY_REPRESENTATION: str BAD_COPY_FILE_FORMAT: str UNTRANSLATABLE_CHARACTER: str NONSTANDARD_USE_OF_ESCAPE_CHARACTER: str INTEGRITY_CONSTRAINT_VIOLATION: str RESTRICT_VIOLATION: str NOT_NULL_VIOLATION: str FOREIGN_KEY_VIOLATION: str UNIQUE_VIOLATION: str CHECK_VIOLATION: str EXCLUSION_VIOLATION: str INVALID_CURSOR_STATE: str INVALID_TRANSACTION_STATE: str ACTIVE_SQL_TRANSACTION: str BRANCH_TRANSACTION_ALREADY_ACTIVE: str INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION: str INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION: str NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION: str READ_ONLY_SQL_TRANSACTION: str SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED: str HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL: str NO_ACTIVE_SQL_TRANSACTION: str IN_FAILED_SQL_TRANSACTION: str IDLE_IN_TRANSACTION_SESSION_TIMEOUT: str INVALID_SQL_STATEMENT_NAME: str TRIGGERED_DATA_CHANGE_VIOLATION: str INVALID_AUTHORIZATION_SPECIFICATION: str INVALID_PASSWORD: str DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST: str DEPENDENT_OBJECTS_STILL_EXIST: str INVALID_TRANSACTION_TERMINATION: str SQL_ROUTINE_EXCEPTION: str MODIFYING_SQL_DATA_NOT_PERMITTED_: str PROHIBITED_SQL_STATEMENT_ATTEMPTED_: str READING_SQL_DATA_NOT_PERMITTED_: str FUNCTION_EXECUTED_NO_RETURN_STATEMENT: str INVALID_CURSOR_NAME: str EXTERNAL_ROUTINE_EXCEPTION: str CONTAINING_SQL_NOT_PERMITTED: str MODIFYING_SQL_DATA_NOT_PERMITTED: str PROHIBITED_SQL_STATEMENT_ATTEMPTED: str READING_SQL_DATA_NOT_PERMITTED: str EXTERNAL_ROUTINE_INVOCATION_EXCEPTION: str INVALID_SQLSTATE_RETURNED: str NULL_VALUE_NOT_ALLOWED: str TRIGGER_PROTOCOL_VIOLATED: str SRF_PROTOCOL_VIOLATED: str EVENT_TRIGGER_PROTOCOL_VIOLATED: str SAVEPOINT_EXCEPTION: str INVALID_SAVEPOINT_SPECIFICATION: str INVALID_CATALOG_NAME: str INVALID_SCHEMA_NAME: str TRANSACTION_ROLLBACK: str SERIALIZATION_FAILURE: str TRANSACTION_INTEGRITY_CONSTRAINT_VIOLATION: str STATEMENT_COMPLETION_UNKNOWN: str DEADLOCK_DETECTED: str SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION: str INSUFFICIENT_PRIVILEGE: str SYNTAX_ERROR: str INVALID_NAME: str INVALID_COLUMN_DEFINITION: str NAME_TOO_LONG: str DUPLICATE_COLUMN: str AMBIGUOUS_COLUMN: str UNDEFINED_COLUMN: str UNDEFINED_OBJECT: str DUPLICATE_OBJECT: str DUPLICATE_ALIAS: str DUPLICATE_FUNCTION: str AMBIGUOUS_FUNCTION: str GROUPING_ERROR: str DATATYPE_MISMATCH: str WRONG_OBJECT_TYPE: str INVALID_FOREIGN_KEY: str CANNOT_COERCE: str UNDEFINED_FUNCTION: str GENERATED_ALWAYS: str RESERVED_NAME: str UNDEFINED_TABLE: str UNDEFINED_PARAMETER: str DUPLICATE_CURSOR: str DUPLICATE_DATABASE: str DUPLICATE_PREPARED_STATEMENT: str DUPLICATE_SCHEMA: str DUPLICATE_TABLE: str AMBIGUOUS_PARAMETER: str AMBIGUOUS_ALIAS: str INVALID_COLUMN_REFERENCE: str INVALID_CURSOR_DEFINITION: str INVALID_DATABASE_DEFINITION: str INVALID_FUNCTION_DEFINITION: str INVALID_PREPARED_STATEMENT_DEFINITION: str INVALID_SCHEMA_DEFINITION: str INVALID_TABLE_DEFINITION: str INVALID_OBJECT_DEFINITION: str INDETERMINATE_DATATYPE: str INVALID_RECURSION: str WINDOWING_ERROR: str COLLATION_MISMATCH: str INDETERMINATE_COLLATION: str WITH_CHECK_OPTION_VIOLATION: str INSUFFICIENT_RESOURCES: str DISK_FULL: str OUT_OF_MEMORY: str TOO_MANY_CONNECTIONS: str CONFIGURATION_LIMIT_EXCEEDED: str PROGRAM_LIMIT_EXCEEDED: str STATEMENT_TOO_COMPLEX: str TOO_MANY_COLUMNS: str TOO_MANY_ARGUMENTS: str OBJECT_NOT_IN_PREREQUISITE_STATE: str OBJECT_IN_USE: str CANT_CHANGE_RUNTIME_PARAM: str LOCK_NOT_AVAILABLE: str UNSAFE_NEW_ENUM_VALUE_USAGE: str OPERATOR_INTERVENTION: str QUERY_CANCELED: str ADMIN_SHUTDOWN: str CRASH_SHUTDOWN: str CANNOT_CONNECT_NOW: str DATABASE_DROPPED: str SYSTEM_ERROR: str IO_ERROR: str UNDEFINED_FILE: str DUPLICATE_FILE: str SNAPSHOT_TOO_OLD: str CONFIG_FILE_ERROR: str LOCK_FILE_EXISTS: str FDW_ERROR: str FDW_OUT_OF_MEMORY: str FDW_DYNAMIC_PARAMETER_VALUE_NEEDED: str FDW_INVALID_DATA_TYPE: str FDW_COLUMN_NAME_NOT_FOUND: str FDW_INVALID_DATA_TYPE_DESCRIPTORS: str FDW_INVALID_COLUMN_NAME: str FDW_INVALID_COLUMN_NUMBER: str FDW_INVALID_USE_OF_NULL_POINTER: str FDW_INVALID_STRING_FORMAT: str FDW_INVALID_HANDLE: str FDW_INVALID_OPTION_INDEX: str FDW_INVALID_OPTION_NAME: str FDW_OPTION_NAME_NOT_FOUND: str FDW_REPLY_HANDLE: str FDW_UNABLE_TO_CREATE_EXECUTION: str FDW_UNABLE_TO_CREATE_REPLY: str FDW_UNABLE_TO_ESTABLISH_CONNECTION: str FDW_NO_SCHEMAS: str FDW_SCHEMA_NOT_FOUND: str FDW_TABLE_NOT_FOUND: str FDW_FUNCTION_SEQUENCE_ERROR: str FDW_TOO_MANY_HANDLES: str FDW_INCONSISTENT_DESCRIPTOR_INFORMATION: str FDW_INVALID_ATTRIBUTE_VALUE: str FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH: str FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER: str PLPGSQL_ERROR: str RAISE_EXCEPTION: str NO_DATA_FOUND: str TOO_MANY_ROWS: str ASSERT_FAILURE: str INTERNAL_ERROR: str DATA_CORRUPTED: str INDEX_CORRUPTED: str patroni-3.2.2/typings/psycopg2/errors.pyi000066400000000000000000000307321455170150700205060ustar00rootroot00000000000000from psycopg2._psycopg import Error as Error, Warning as Warning class DatabaseError(Error): ... class InterfaceError(Error): ... class DataError(DatabaseError): ... class DiagnosticsException(DatabaseError): ... class IntegrityError(DatabaseError): ... class InternalError(DatabaseError): ... class InvalidGrantOperation(DatabaseError): ... class InvalidGrantor(DatabaseError): ... class InvalidLocatorSpecification(DatabaseError): ... class InvalidRoleSpecification(DatabaseError): ... class InvalidTransactionInitiation(DatabaseError): ... class LocatorException(DatabaseError): ... class NoAdditionalDynamicResultSetsReturned(DatabaseError): ... class NoData(DatabaseError): ... class NotSupportedError(DatabaseError): ... class OperationalError(DatabaseError): ... class ProgrammingError(DatabaseError): ... class SnapshotTooOld(DatabaseError): ... class SqlStatementNotYetComplete(DatabaseError): ... class StackedDiagnosticsAccessedWithoutActiveHandler(DatabaseError): ... class TriggeredActionException(DatabaseError): ... class ActiveSqlTransaction(InternalError): ... class AdminShutdown(OperationalError): ... class AmbiguousAlias(ProgrammingError): ... class AmbiguousColumn(ProgrammingError): ... class AmbiguousFunction(ProgrammingError): ... class AmbiguousParameter(ProgrammingError): ... class ArraySubscriptError(DataError): ... class AssertFailure(InternalError): ... class BadCopyFileFormat(DataError): ... class BranchTransactionAlreadyActive(InternalError): ... class CannotCoerce(ProgrammingError): ... class CannotConnectNow(OperationalError): ... class CantChangeRuntimeParam(OperationalError): ... class CardinalityViolation(ProgrammingError): ... class CaseNotFound(ProgrammingError): ... class CharacterNotInRepertoire(DataError): ... class CheckViolation(IntegrityError): ... class CollationMismatch(ProgrammingError): ... class ConfigFileError(InternalError): ... class ConfigurationLimitExceeded(OperationalError): ... class ConnectionDoesNotExist(OperationalError): ... class ConnectionException(OperationalError): ... class ConnectionFailure(OperationalError): ... class ContainingSqlNotPermitted(InternalError): ... class CrashShutdown(OperationalError): ... class DataCorrupted(InternalError): ... class DataException(DataError): ... class DatabaseDropped(OperationalError): ... class DatatypeMismatch(ProgrammingError): ... class DatetimeFieldOverflow(DataError): ... class DependentObjectsStillExist(InternalError): ... class DependentPrivilegeDescriptorsStillExist(InternalError): ... class DiskFull(OperationalError): ... class DivisionByZero(DataError): ... class DuplicateAlias(ProgrammingError): ... class DuplicateColumn(ProgrammingError): ... class DuplicateCursor(ProgrammingError): ... class DuplicateDatabase(ProgrammingError): ... class DuplicateFile(OperationalError): ... class DuplicateFunction(ProgrammingError): ... class DuplicateJsonObjectKeyValue(DataError): ... class DuplicateObject(ProgrammingError): ... class DuplicatePreparedStatement(ProgrammingError): ... class DuplicateSchema(ProgrammingError): ... class DuplicateTable(ProgrammingError): ... class ErrorInAssignment(DataError): ... class EscapeCharacterConflict(DataError): ... class EventTriggerProtocolViolated(InternalError): ... class ExclusionViolation(IntegrityError): ... class ExternalRoutineException(InternalError): ... class ExternalRoutineInvocationException(InternalError): ... class FdwColumnNameNotFound(OperationalError): ... class FdwDynamicParameterValueNeeded(OperationalError): ... class FdwError(OperationalError): ... class FdwFunctionSequenceError(OperationalError): ... class FdwInconsistentDescriptorInformation(OperationalError): ... class FdwInvalidAttributeValue(OperationalError): ... class FdwInvalidColumnName(OperationalError): ... class FdwInvalidColumnNumber(OperationalError): ... class FdwInvalidDataType(OperationalError): ... class FdwInvalidDataTypeDescriptors(OperationalError): ... class FdwInvalidDescriptorFieldIdentifier(OperationalError): ... class FdwInvalidHandle(OperationalError): ... class FdwInvalidOptionIndex(OperationalError): ... class FdwInvalidOptionName(OperationalError): ... class FdwInvalidStringFormat(OperationalError): ... class FdwInvalidStringLengthOrBufferLength(OperationalError): ... class FdwInvalidUseOfNullPointer(OperationalError): ... class FdwNoSchemas(OperationalError): ... class FdwOptionNameNotFound(OperationalError): ... class FdwOutOfMemory(OperationalError): ... class FdwReplyHandle(OperationalError): ... class FdwSchemaNotFound(OperationalError): ... class FdwTableNotFound(OperationalError): ... class FdwTooManyHandles(OperationalError): ... class FdwUnableToCreateExecution(OperationalError): ... class FdwUnableToCreateReply(OperationalError): ... class FdwUnableToEstablishConnection(OperationalError): ... class FeatureNotSupported(NotSupportedError): ... class FloatingPointException(DataError): ... class ForeignKeyViolation(IntegrityError): ... class FunctionExecutedNoReturnStatement(InternalError): ... class GeneratedAlways(ProgrammingError): ... class GroupingError(ProgrammingError): ... class HeldCursorRequiresSameIsolationLevel(InternalError): ... class IdleInTransactionSessionTimeout(InternalError): ... class InFailedSqlTransaction(InternalError): ... class InappropriateAccessModeForBranchTransaction(InternalError): ... class InappropriateIsolationLevelForBranchTransaction(InternalError): ... class IndeterminateCollation(ProgrammingError): ... class IndeterminateDatatype(ProgrammingError): ... class IndexCorrupted(InternalError): ... class IndicatorOverflow(DataError): ... class InsufficientPrivilege(ProgrammingError): ... class InsufficientResources(OperationalError): ... class IntegrityConstraintViolation(IntegrityError): ... class InternalError_(InternalError): ... class IntervalFieldOverflow(DataError): ... class InvalidArgumentForLogarithm(DataError): ... class InvalidArgumentForNthValueFunction(DataError): ... class InvalidArgumentForNtileFunction(DataError): ... class InvalidArgumentForPowerFunction(DataError): ... class InvalidArgumentForSqlJsonDatetimeFunction(DataError): ... class InvalidArgumentForWidthBucketFunction(DataError): ... class InvalidAuthorizationSpecification(OperationalError): ... class InvalidBinaryRepresentation(DataError): ... class InvalidCatalogName(ProgrammingError): ... class InvalidCharacterValueForCast(DataError): ... class InvalidColumnDefinition(ProgrammingError): ... class InvalidColumnReference(ProgrammingError): ... class InvalidCursorDefinition(ProgrammingError): ... class InvalidCursorName(OperationalError): ... class InvalidCursorState(InternalError): ... class InvalidDatabaseDefinition(ProgrammingError): ... class InvalidDatetimeFormat(DataError): ... class InvalidEscapeCharacter(DataError): ... class InvalidEscapeOctet(DataError): ... class InvalidEscapeSequence(DataError): ... class InvalidForeignKey(ProgrammingError): ... class InvalidFunctionDefinition(ProgrammingError): ... class InvalidIndicatorParameterValue(DataError): ... class InvalidJsonText(DataError): ... class InvalidName(ProgrammingError): ... class InvalidObjectDefinition(ProgrammingError): ... class InvalidParameterValue(DataError): ... class InvalidPassword(OperationalError): ... class InvalidPrecedingOrFollowingSize(DataError): ... class InvalidPreparedStatementDefinition(ProgrammingError): ... class InvalidRecursion(ProgrammingError): ... class InvalidRegularExpression(DataError): ... class InvalidRowCountInLimitClause(DataError): ... class InvalidRowCountInResultOffsetClause(DataError): ... class InvalidSavepointSpecification(InternalError): ... class InvalidSchemaDefinition(ProgrammingError): ... class InvalidSchemaName(ProgrammingError): ... class InvalidSqlJsonSubscript(DataError): ... class InvalidSqlStatementName(OperationalError): ... class InvalidSqlstateReturned(InternalError): ... class InvalidTableDefinition(ProgrammingError): ... class InvalidTablesampleArgument(DataError): ... class InvalidTablesampleRepeat(DataError): ... class InvalidTextRepresentation(DataError): ... class InvalidTimeZoneDisplacementValue(DataError): ... class InvalidTransactionState(InternalError): ... class InvalidTransactionTermination(InternalError): ... class InvalidUseOfEscapeCharacter(DataError): ... class InvalidXmlComment(DataError): ... class InvalidXmlContent(DataError): ... class InvalidXmlDocument(DataError): ... class InvalidXmlProcessingInstruction(DataError): ... class IoError(OperationalError): ... class LockFileExists(InternalError): ... class LockNotAvailable(OperationalError): ... class ModifyingSqlDataNotPermitted(InternalError): ... class ModifyingSqlDataNotPermittedExt(InternalError): ... class MoreThanOneSqlJsonItem(DataError): ... class MostSpecificTypeMismatch(DataError): ... class NameTooLong(ProgrammingError): ... class NoActiveSqlTransaction(InternalError): ... class NoActiveSqlTransactionForBranchTransaction(InternalError): ... class NoDataFound(InternalError): ... class NoSqlJsonItem(DataError): ... class NonNumericSqlJsonItem(DataError): ... class NonUniqueKeysInAJsonObject(DataError): ... class NonstandardUseOfEscapeCharacter(DataError): ... class NotAnXmlDocument(DataError): ... class NotNullViolation(IntegrityError): ... class NullValueNoIndicatorParameter(DataError): ... class NullValueNotAllowed(DataError): ... class NullValueNotAllowedExt(InternalError): ... class NumericValueOutOfRange(DataError): ... class ObjectInUse(OperationalError): ... class ObjectNotInPrerequisiteState(OperationalError): ... class OperatorIntervention(OperationalError): ... class OutOfMemory(OperationalError): ... class PlpgsqlError(InternalError): ... class ProgramLimitExceeded(OperationalError): ... class ProhibitedSqlStatementAttempted(InternalError): ... class ProhibitedSqlStatementAttemptedExt(InternalError): ... class ProtocolViolation(OperationalError): ... class QueryCanceledError(OperationalError): ... class RaiseException(InternalError): ... class ReadOnlySqlTransaction(InternalError): ... class ReadingSqlDataNotPermitted(InternalError): ... class ReadingSqlDataNotPermittedExt(InternalError): ... class ReservedName(ProgrammingError): ... class RestrictViolation(IntegrityError): ... class SavepointException(InternalError): ... class SchemaAndDataStatementMixingNotSupported(InternalError): ... class SequenceGeneratorLimitExceeded(DataError): ... class SingletonSqlJsonItemRequired(DataError): ... class SqlJsonArrayNotFound(DataError): ... class SqlJsonMemberNotFound(DataError): ... class SqlJsonNumberNotFound(DataError): ... class SqlJsonObjectNotFound(DataError): ... class SqlJsonScalarRequired(DataError): ... class SqlRoutineException(InternalError): ... class SqlclientUnableToEstablishSqlconnection(OperationalError): ... class SqlserverRejectedEstablishmentOfSqlconnection(OperationalError): ... class SrfProtocolViolated(InternalError): ... class StatementTooComplex(OperationalError): ... class StringDataLengthMismatch(DataError): ... class StringDataRightTruncation(DataError): ... class SubstringError(DataError): ... class SyntaxError(ProgrammingError): ... class SyntaxErrorOrAccessRuleViolation(ProgrammingError): ... class SystemError(OperationalError): ... class TooManyArguments(OperationalError): ... class TooManyColumns(OperationalError): ... class TooManyConnections(OperationalError): ... class TooManyJsonArrayElements(DataError): ... class TooManyJsonObjectMembers(DataError): ... class TooManyRows(InternalError): ... class TransactionResolutionUnknown(OperationalError): ... class TransactionRollbackError(OperationalError): ... class TriggerProtocolViolated(InternalError): ... class TriggeredDataChangeViolation(OperationalError): ... class TrimError(DataError): ... class UndefinedColumn(ProgrammingError): ... class UndefinedFile(OperationalError): ... class UndefinedFunction(ProgrammingError): ... class UndefinedObject(ProgrammingError): ... class UndefinedParameter(ProgrammingError): ... class UndefinedTable(ProgrammingError): ... class UniqueViolation(IntegrityError): ... class UnsafeNewEnumValueUsage(OperationalError): ... class UnterminatedCString(DataError): ... class UntranslatableCharacter(DataError): ... class WindowingError(ProgrammingError): ... class WithCheckOptionViolation(ProgrammingError): ... class WrongObjectType(ProgrammingError): ... class ZeroLengthCharacterString(DataError): ... class DeadlockDetected(TransactionRollbackError): ... class QueryCanceled(QueryCanceledError): ... class SerializationFailure(TransactionRollbackError): ... class StatementCompletionUnknown(TransactionRollbackError): ... class TransactionIntegrityConstraintViolation(TransactionRollbackError): ... class TransactionRollback(TransactionRollbackError): ... def lookup(code): ... patroni-3.2.2/typings/psycopg2/extensions.pyi000066400000000000000000000061631455170150700213720ustar00rootroot00000000000000from _typeshed import Incomplete from typing import Any from psycopg2._psycopg import ( BINARYARRAY as BINARYARRAY, BOOLEAN as BOOLEAN, BOOLEANARRAY as BOOLEANARRAY, BYTES as BYTES, BYTESARRAY as BYTESARRAY, DATE as DATE, DATEARRAY as DATEARRAY, DATETIMEARRAY as DATETIMEARRAY, DECIMAL as DECIMAL, DECIMALARRAY as DECIMALARRAY, FLOAT as FLOAT, FLOATARRAY as FLOATARRAY, INTEGER as INTEGER, INTEGERARRAY as INTEGERARRAY, INTERVAL as INTERVAL, INTERVALARRAY as INTERVALARRAY, LONGINTEGER as LONGINTEGER, LONGINTEGERARRAY as LONGINTEGERARRAY, PYDATE as PYDATE, PYDATEARRAY as PYDATEARRAY, PYDATETIME as PYDATETIME, PYDATETIMEARRAY as PYDATETIMEARRAY, PYDATETIMETZ as PYDATETIMETZ, PYDATETIMETZARRAY as PYDATETIMETZARRAY, PYINTERVAL as PYINTERVAL, PYINTERVALARRAY as PYINTERVALARRAY, PYTIME as PYTIME, PYTIMEARRAY as PYTIMEARRAY, ROWIDARRAY as ROWIDARRAY, STRINGARRAY as STRINGARRAY, TIME as TIME, TIMEARRAY as TIMEARRAY, UNICODE as UNICODE, UNICODEARRAY as UNICODEARRAY, AsIs as AsIs, Binary as Binary, Boolean as Boolean, Column as Column, ConnectionInfo as ConnectionInfo, DateFromPy as DateFromPy, Diagnostics as Diagnostics, Float as Float, Int as Int, IntervalFromPy as IntervalFromPy, ISQLQuote as ISQLQuote, Notify as Notify, QueryCanceledError as QueryCanceledError, QuotedString as QuotedString, TimeFromPy as TimeFromPy, TimestampFromPy as TimestampFromPy, TransactionRollbackError as TransactionRollbackError, Xid as Xid, adapt as adapt, adapters as adapters, binary_types as binary_types, connection as connection, cursor as cursor, encodings as encodings, encrypt_password as encrypt_password, get_wait_callback as get_wait_callback, libpq_version as libpq_version, lobject as lobject, new_array_type as new_array_type, new_type as new_type, parse_dsn as parse_dsn, quote_ident as quote_ident, register_type as register_type, set_wait_callback as set_wait_callback, string_types as string_types, ) ISOLATION_LEVEL_AUTOCOMMIT: int ISOLATION_LEVEL_READ_UNCOMMITTED: int ISOLATION_LEVEL_READ_COMMITTED: int ISOLATION_LEVEL_REPEATABLE_READ: int ISOLATION_LEVEL_SERIALIZABLE: int ISOLATION_LEVEL_DEFAULT: Any STATUS_SETUP: int STATUS_READY: int STATUS_BEGIN: int STATUS_SYNC: int STATUS_ASYNC: int STATUS_PREPARED: int STATUS_IN_TRANSACTION: int POLL_OK: int POLL_READ: int POLL_WRITE: int POLL_ERROR: int TRANSACTION_STATUS_IDLE: int TRANSACTION_STATUS_ACTIVE: int TRANSACTION_STATUS_INTRANS: int TRANSACTION_STATUS_INERROR: int TRANSACTION_STATUS_UNKNOWN: int def register_adapter(typ, callable) -> None: ... class SQL_IN: def __init__(self, seq) -> None: ... def prepare(self, conn) -> None: ... def getquoted(self): ... class NoneAdapter: def __init__(self, obj) -> None: ... def getquoted(self, _null: bytes = b"NULL"): ... def make_dsn(dsn: Incomplete | None = None, **kwargs): ... JSON: Any JSONARRAY: Any JSONB: Any JSONBARRAY: Any def adapt(obj: Any) -> ISQLQuote: ... patroni-3.2.2/typings/psycopg2/extras.pyi000066400000000000000000000205701455170150700204770ustar00rootroot00000000000000from _typeshed import Incomplete from collections import OrderedDict from collections.abc import Callable from typing import Any, NamedTuple, TypeVar, overload from psycopg2._ipaddress import register_ipaddress as register_ipaddress from psycopg2._json import ( Json as Json, register_default_json as register_default_json, register_default_jsonb as register_default_jsonb, register_json as register_json, ) from psycopg2._psycopg import ( REPLICATION_LOGICAL as REPLICATION_LOGICAL, REPLICATION_PHYSICAL as REPLICATION_PHYSICAL, ReplicationConnection as _replicationConnection, ReplicationCursor as _replicationCursor, ReplicationMessage as ReplicationMessage, ) from psycopg2._range import ( DateRange as DateRange, DateTimeRange as DateTimeRange, DateTimeTZRange as DateTimeTZRange, NumericRange as NumericRange, Range as Range, RangeAdapter as RangeAdapter, RangeCaster as RangeCaster, register_range as register_range, ) from .extensions import connection as _connection, cursor as _cursor, quote_ident as quote_ident _T_cur = TypeVar("_T_cur", bound=_cursor) class DictCursorBase(_cursor): def __init__(self, *args, **kwargs) -> None: ... class DictConnection(_connection): @overload def cursor(self, name: str | bytes | None = ..., *, withhold: bool = ..., scrollable: bool | None = ...) -> DictCursor: ... @overload def cursor( self, name: str | bytes | None = ..., *, cursor_factory: Callable[..., _T_cur], withhold: bool = ..., scrollable: bool | None = ..., ) -> _T_cur: ... @overload def cursor( self, name: str | bytes | None, cursor_factory: Callable[..., _T_cur], withhold: bool = ..., scrollable: bool | None = ... ) -> _T_cur: ... class DictCursor(DictCursorBase): def __init__(self, *args, **kwargs) -> None: ... index: Any def execute(self, query, vars: Incomplete | None = None): ... def callproc(self, procname, vars: Incomplete | None = None): ... def fetchone(self) -> DictRow | None: ... # type: ignore[override] def fetchmany(self, size: int | None = None) -> list[DictRow]: ... # type: ignore[override] def fetchall(self) -> list[DictRow]: ... # type: ignore[override] def __next__(self) -> DictRow: ... # type: ignore[override] class DictRow(list[Any]): def __init__(self, cursor) -> None: ... def __getitem__(self, x): ... def __setitem__(self, x, v) -> None: ... def items(self): ... def keys(self): ... def values(self): ... def get(self, x, default: Incomplete | None = None): ... def copy(self): ... def __contains__(self, x): ... def __reduce__(self): ... class RealDictConnection(_connection): @overload def cursor( self, name: str | bytes | None = ..., *, withhold: bool = ..., scrollable: bool | None = ... ) -> RealDictCursor: ... @overload def cursor( self, name: str | bytes | None = ..., *, cursor_factory: Callable[..., _T_cur], withhold: bool = ..., scrollable: bool | None = ..., ) -> _T_cur: ... @overload def cursor( self, name: str | bytes | None, cursor_factory: Callable[..., _T_cur], withhold: bool = ..., scrollable: bool | None = ... ) -> _T_cur: ... class RealDictCursor(DictCursorBase): def __init__(self, *args, **kwargs) -> None: ... column_mapping: Any def execute(self, query, vars: Incomplete | None = None): ... def callproc(self, procname, vars: Incomplete | None = None): ... def fetchone(self) -> RealDictRow | None: ... # type: ignore[override] def fetchmany(self, size: int | None = None) -> list[RealDictRow]: ... # type: ignore[override] def fetchall(self) -> list[RealDictRow]: ... # type: ignore[override] def __next__(self) -> RealDictRow: ... # type: ignore[override] class RealDictRow(OrderedDict[Any, Any]): def __init__(self, *args, **kwargs) -> None: ... def __setitem__(self, key, value) -> None: ... class NamedTupleConnection(_connection): @overload def cursor( self, name: str | bytes | None = ..., *, withhold: bool = ..., scrollable: bool | None = ... ) -> NamedTupleCursor: ... @overload def cursor( self, name: str | bytes | None = ..., *, cursor_factory: Callable[..., _T_cur], withhold: bool = ..., scrollable: bool | None = ..., ) -> _T_cur: ... @overload def cursor( self, name: str | bytes | None, cursor_factory: Callable[..., _T_cur], withhold: bool = ..., scrollable: bool | None = ... ) -> _T_cur: ... class NamedTupleCursor(_cursor): Record: Any MAX_CACHE: int def execute(self, query, vars: Incomplete | None = None): ... def executemany(self, query, vars): ... def callproc(self, procname, vars: Incomplete | None = None): ... def fetchone(self) -> NamedTuple | None: ... def fetchmany(self, size: int | None = None) -> list[NamedTuple]: ... # type: ignore[override] def fetchall(self) -> list[NamedTuple]: ... # type: ignore[override] def __next__(self) -> NamedTuple: ... class LoggingConnection(_connection): log: Any def initialize(self, logobj) -> None: ... def filter(self, msg, curs): ... def cursor(self, *args, **kwargs): ... class LoggingCursor(_cursor): def execute(self, query, vars: Incomplete | None = None): ... def callproc(self, procname, vars: Incomplete | None = None): ... class MinTimeLoggingConnection(LoggingConnection): def initialize(self, logobj, mintime: int = 0) -> None: ... def filter(self, msg, curs): ... def cursor(self, *args, **kwargs): ... class MinTimeLoggingCursor(LoggingCursor): timestamp: Any def execute(self, query, vars: Incomplete | None = None): ... def callproc(self, procname, vars: Incomplete | None = None): ... class LogicalReplicationConnection(_replicationConnection): def __init__(self, *args, **kwargs) -> None: ... class PhysicalReplicationConnection(_replicationConnection): def __init__(self, *args, **kwargs) -> None: ... class StopReplication(Exception): ... class ReplicationCursor(_replicationCursor): def create_replication_slot( self, slot_name, slot_type: Incomplete | None = None, output_plugin: Incomplete | None = None ) -> None: ... def drop_replication_slot(self, slot_name) -> None: ... def start_replication( self, slot_name: Incomplete | None = None, slot_type: Incomplete | None = None, start_lsn: int = 0, timeline: int = 0, options: Incomplete | None = None, decode: bool = False, status_interval: int = 10, ) -> None: ... def fileno(self): ... class UUID_adapter: def __init__(self, uuid) -> None: ... def __conform__(self, proto): ... def getquoted(self): ... def register_uuid(oids: Incomplete | None = None, conn_or_curs: Incomplete | None = None): ... class Inet: addr: Any def __init__(self, addr) -> None: ... def prepare(self, conn) -> None: ... def getquoted(self): ... def __conform__(self, proto): ... def register_inet(oid: Incomplete | None = None, conn_or_curs: Incomplete | None = None): ... def wait_select(conn) -> None: ... class HstoreAdapter: wrapped: Any def __init__(self, wrapped) -> None: ... conn: Any getquoted: Any def prepare(self, conn) -> None: ... @classmethod def parse(cls, s, cur, _bsdec=...): ... @classmethod def parse_unicode(cls, s, cur): ... @classmethod def get_oids(cls, conn_or_curs): ... def register_hstore( conn_or_curs, globally: bool = False, unicode: bool = False, oid: Incomplete | None = None, array_oid: Incomplete | None = None, ) -> None: ... class CompositeCaster: name: Any schema: Any oid: Any array_oid: Any attnames: Any atttypes: Any typecaster: Any array_typecaster: Any def __init__(self, name, oid, attrs, array_oid: Incomplete | None = None, schema: Incomplete | None = None) -> None: ... def parse(self, s, curs): ... def make(self, values): ... @classmethod def tokenize(cls, s): ... def register_composite(name, conn_or_curs, globally: bool = False, factory: Incomplete | None = None): ... def execute_batch(cur, sql, argslist, page_size: int = 100) -> None: ... def execute_values(cur, sql, argslist, template: Incomplete | None = None, page_size: int = 100, fetch: bool = False): ... patroni-3.2.2/typings/psycopg2/pool.pyi000066400000000000000000000017251455170150700201430ustar00rootroot00000000000000from _typeshed import Incomplete from typing import Any import psycopg2 class PoolError(psycopg2.Error): ... class AbstractConnectionPool: minconn: Any maxconn: Any closed: bool def __init__(self, minconn, maxconn, *args, **kwargs) -> None: ... # getconn, putconn and closeall are officially documented as methods of the # abstract base class, but in reality, they only exist on the children classes def getconn(self, key: Incomplete | None = ...): ... def putconn(self, conn: Any, key: Incomplete | None = ..., close: bool = ...) -> None: ... def closeall(self) -> None: ... class SimpleConnectionPool(AbstractConnectionPool): ... class ThreadedConnectionPool(AbstractConnectionPool): # This subclass has a default value for conn which doesn't exist # in the SimpleConnectionPool class, nor in the documentation def putconn(self, conn: Incomplete | None = None, key: Incomplete | None = None, close: bool = False) -> None: ... patroni-3.2.2/typings/psycopg2/sql.pyi000066400000000000000000000027421455170150700177710ustar00rootroot00000000000000from _typeshed import Incomplete from collections.abc import Iterator from typing import Any class Composable: def __init__(self, wrapped) -> None: ... def as_string(self, context) -> str: ... def __add__(self, other) -> Composed: ... def __mul__(self, n) -> Composed: ... def __eq__(self, other) -> bool: ... def __ne__(self, other) -> bool: ... class Composed(Composable): def __init__(self, seq) -> None: ... @property def seq(self) -> list[Composable]: ... def as_string(self, context) -> str: ... def __iter__(self) -> Iterator[Composable]: ... def __add__(self, other) -> Composed: ... def join(self, joiner) -> Composed: ... class SQL(Composable): def __init__(self, string) -> None: ... @property def string(self) -> str: ... def as_string(self, context) -> str: ... def format(self, *args, **kwargs) -> Composed: ... def join(self, seq) -> Composed: ... class Identifier(Composable): def __init__(self, *strings) -> None: ... @property def strings(self) -> tuple[str, ...]: ... @property def string(self) -> str: ... def as_string(self, context) -> str: ... class Literal(Composable): @property def wrapped(self): ... def as_string(self, context) -> str: ... class Placeholder(Composable): def __init__(self, name: Incomplete | None = None) -> None: ... @property def name(self) -> str | None: ... def as_string(self, context) -> str: ... NULL: Any DEFAULT: Any patroni-3.2.2/typings/psycopg2/tz.pyi000066400000000000000000000012721455170150700176240ustar00rootroot00000000000000import datetime from _typeshed import Incomplete from typing import Any ZERO: Any class FixedOffsetTimezone(datetime.tzinfo): def __init__(self, offset: Incomplete | None = None, name: Incomplete | None = None) -> None: ... def __new__(cls, offset: Incomplete | None = None, name: Incomplete | None = None): ... def __eq__(self, other): ... def __ne__(self, other): ... def __getinitargs__(self): ... def utcoffset(self, dt): ... def tzname(self, dt): ... def dst(self, dt): ... STDOFFSET: Any DSTOFFSET: Any DSTDIFF: Any class LocalTimezone(datetime.tzinfo): def utcoffset(self, dt): ... def dst(self, dt): ... def tzname(self, dt): ... LOCAL: Any patroni-3.2.2/typings/pysyncobj/000077500000000000000000000000001455170150700167145ustar00rootroot00000000000000patroni-3.2.2/typings/pysyncobj/__init__.pyi000066400000000000000000000002051455170150700211730ustar00rootroot00000000000000from .syncobj import FAIL_REASON, SyncObj, SyncObjConf, replicated __all__ = ['SyncObj', 'SyncObjConf', 'replicated', 'FAIL_REASON'] patroni-3.2.2/typings/pysyncobj/config.pyi000066400000000000000000000004771455170150700207140ustar00rootroot00000000000000from typing import Optional class FAIL_REASON: SUCCESS = ... QUEUE_FULL = ... MISSING_LEADER = ... DISCARDED = ... NOT_LEADER = ... LEADER_CHANGED = ... REQUEST_DENIED = ... class SyncObjConf: password: Optional[str] autoTickPeriod: int def __init__(self, **kwargs) -> None: ... patroni-3.2.2/typings/pysyncobj/dns_resolver.pyi000066400000000000000000000003631455170150700221460ustar00rootroot00000000000000from typing import Optional class DnsCachingResolver: def setTimeouts(self, cacheTime: float, failCacheTime: float) -> None: ... def resolve(self, hostname: str) -> Optional[str]: ... def globalDnsResolver() -> DnsCachingResolver: ... patroni-3.2.2/typings/pysyncobj/node.pyi000066400000000000000000000001711455170150700203630ustar00rootroot00000000000000class Node: @property def id(self) -> str: ... class TCPNode(Node): @property def host(self) -> str: ... patroni-3.2.2/typings/pysyncobj/syncobj.pyi000066400000000000000000000021021455170150700211010ustar00rootroot00000000000000from typing import Any, Callable, Collection, List, Optional, Set, Type from .config import FAIL_REASON, SyncObjConf from .node import Node from .transport import Transport __all__ = ['FAIL_REASON', 'SyncObj', 'SyncObjConf', 'replicated'] class SyncObj: def __init__(self, selfNode: Optional[str], otherNodes: Collection[str], conf: SyncObjConf=..., consumers=..., nodeClass=..., transport=..., transportClass: Type[Transport]=...) -> None: ... def destroy(self) -> None: ... def doTick(self, timeToWait: float = 0.0) -> None: ... def isNodeConnected(self, node: Node) -> bool: ... @property def selfNode(self) -> Node: ... @property def otherNodes(self) -> Set[Node]: ... @property def raftLastApplied(self) -> int: ... @property def raftCommitIndex(self) -> int: ... @property def conf(self) -> SyncObjConf: ... def _getLeader(self) -> Optional[Node]: ... def _isLeader(self) -> bool: ... def _onTick(self, timeToWait: float = 0.0) -> None: ... def replicated(*decArgs: Any, **decKwargs: Any) -> Callable[..., Any]: ... patroni-3.2.2/typings/pysyncobj/tcp_connection.pyi000066400000000000000000000001301455170150700224360ustar00rootroot00000000000000class CONNECTION_STATE: DISCONNECTED = ... CONNECTING = ... CONNECTED = ... patroni-3.2.2/typings/pysyncobj/transport.pyi000066400000000000000000000010531455170150700214720ustar00rootroot00000000000000from typing import Any, Callable, Collection, Optional from .node import TCPNode from .syncobj import SyncObj from .tcp_connection import CONNECTION_STATE __all__ = ['CONNECTION_STATE', 'TCPTransport'] class Transport: def setOnUtilityMessageCallback(self, message: str, callback: Callable[[Any, Callable[..., Any]], Any]) -> None: ... class TCPTransport(Transport): def __init__(self, syncObj: SyncObj, selfNode: Optional[TCPNode], otherNodes: Collection[TCPNode]) -> None: ... def _connectIfNecessarySingle(self, node: TCPNode) -> bool: ... patroni-3.2.2/typings/pysyncobj/utility.pyi000066400000000000000000000004251455170150700211430ustar00rootroot00000000000000from typing import Any, List, Optional, Union from .node import TCPNode class TcpUtility(Utility): def __init__(self, password: Optional[str] = None, timeout: float=900.0) -> None: ... def executeCommand(self, node: Union[str, TCPNode], command: List[Any]) -> Any: ... patroni-3.2.2/typings/urllib3/000077500000000000000000000000001455170150700162505ustar00rootroot00000000000000patroni-3.2.2/typings/urllib3/__init__.pyi000066400000000000000000000005101455170150700205260ustar00rootroot00000000000000from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool from .poolmanager import PoolManager from .response import HTTPResponse from .util.request import make_headers from .util.timeout import Timeout __all__ = ['HTTPResponse', 'HTTPConnectionPool', 'HTTPSConnectionPool', 'PoolManager', 'Timeout', 'make_headers'] patroni-3.2.2/typings/urllib3/_collections.pyi000066400000000000000000000015721455170150700214550ustar00rootroot00000000000000from typing import Any, MutableMapping class HTTPHeaderDict(MutableMapping[str, str]): def __init__(self, headers=None, **kwargs) -> None: ... def __setitem__(self, key, val) -> None: ... def __getitem__(self, key): ... def __delitem__(self, key) -> None: ... def __contains__(self, key): ... def __eq__(self, other): ... def __iter__(self) -> NoReturn: ... def __len__(self) -> int: ... def __ne__(self, other): ... values: Any get: Any update: Any iterkeys: Any itervalues: Any def pop(self, key, default=...): ... def discard(self, key): ... def add(self, key, val): ... def extend(self, *args, **kwargs): ... def getlist(self, key): ... getheaders: Any getallmatchingheaders: Any iget: Any def copy(self): ... def iteritems(self): ... def itermerged(self): ... def items(self): ... patroni-3.2.2/typings/urllib3/connection.pyi000066400000000000000000000001451455170150700211320ustar00rootroot00000000000000from http.client import HTTPConnection as _HTTPConnection class HTTPConnection(_HTTPConnection): ... patroni-3.2.2/typings/urllib3/connectionpool.pyi000066400000000000000000000001211455170150700220160ustar00rootroot00000000000000class HTTPConnectionPool: ... class HTTPSConnectionPool(HTTPConnectionPool): ... patroni-3.2.2/typings/urllib3/poolmanager.pyi000066400000000000000000000012671455170150700213050ustar00rootroot00000000000000from typing import Any, Dict, Optional from .response import HTTPResponse class PoolManager: headers: Dict[str, str] connection_pool_kw: Dict[str, Any] def __init__(self, num_pools: int = 10, headers: Optional[Dict[str, str]] = None, **connection_pool_kw: Any) -> None: ... def urlopen(self, method: str, url: str, body: Optional[Any] = None, headers: Optional[Dict[str,str]] = None, encode_multipart: bool = True, multipart_boundary: Optional[str] = None, **kw: Any) -> HTTPResponse: ... def request(self, method: str, url: str, fields: Optional[Any] = None, headers: Optional[Dict[str, str]] = None, **urlopen_kw: Any) -> HTTPResponse: ... def clear(self) -> None: ... patroni-3.2.2/typings/urllib3/response.pyi000066400000000000000000000010211455170150700206230ustar00rootroot00000000000000import io from typing import Any, Iterator, Optional, Union from ._collections import HTTPHeaderDict from .connection import HTTPConnection class HTTPResponse(io.IOBase): headers: HTTPHeaderDict status: int reason: Optional[str] def release_conn(self) -> None: ... @property def data(self) -> Union[bytes, Any]: ... @property def connection(self) -> Optional[HTTPConnection]: ... def read_chunked(self, amt: Optional[int] = None, decode_content: Optional[bool] = None) -> Iterator[bytes]: ... patroni-3.2.2/typings/urllib3/util/000077500000000000000000000000001455170150700172255ustar00rootroot00000000000000patroni-3.2.2/typings/urllib3/util/request.pyi000066400000000000000000000005421455170150700214410ustar00rootroot00000000000000from typing import Optional, Union, Dict, List def make_headers( keep_alive: Optional[bool] = None, accept_encoding: Union[bool, List[str], str, None] = None, user_agent: Optional[str] = None, basic_auth: Optional[str] = None, proxy_basic_auth: Optional[str] = None, disable_cache: Optional[bool] = None, ) -> Dict[str, str]: ... patroni-3.2.2/typings/urllib3/util/timeout.pyi000066400000000000000000000003131455170150700214330ustar00rootroot00000000000000from typing import Any, Optional class Timeout: DEFAULT_TIMEOUT: Any def __init__(self, total: Optional[float] = None, connect: Optional[float] = None, read: Optional[float] = None) -> None: ... patroni-3.2.2/typings/ydiff/000077500000000000000000000000001455170150700157755ustar00rootroot00000000000000patroni-3.2.2/typings/ydiff/__init__.pyi000066400000000000000000000002521455170150700202560ustar00rootroot00000000000000import io from typing import Any class PatchStream: def __init__(self, diff_hdl: io.TextIOBase) -> None: ... def markup_to_pager(stream: Any, opts: Any) -> None: ...