pax_global_header00006660000000000000000000000064136135611510014514gustar00rootroot0000000000000052 comment=dc1966e3bcba9fde7f6113e72fdd97395aab6e32 patroni-1.6.4/000077500000000000000000000000001361356115100132005ustar00rootroot00000000000000patroni-1.6.4/.gitignore000066400000000000000000000010551361356115100151710ustar00rootroot00000000000000*.py[cod] # vi(m) swap files: *.sw? # C extensions *.so # Packages .cache/ *.egg *.eggs *.egg-info dist build eggs parts bin var sdist develop-eggs .installed.cfg lib lib64 # Installer logs pip-log.txt # Unit test / coverage reports .coverage .tox nosetests.xml coverage.xml htmlcov junit.xml features/output dummy # Translations *.mo # Mr Developer .mr.developer.cfg .project .pydevproject pgpass scm-source.json # Sphinx-generated documentation docs/build/ docs/source/_static/ docs/source/_templates/ # Pycharm IDE .idea/ #VSCode IDE .vscode/ patroni-1.6.4/.travis.yml000066400000000000000000000137551361356115100153240ustar00rootroot00000000000000sudo: true dist: trusty language: python addons: apt: packages: - expect-dev # for unbuffer env: global: - ETCDVERSION=3.0.17 ZKVERSION=3.4.11 CONSULVERSION=0.7.4 - PYVERSIONS="2.7 3.5 3.6" - EXCLUDE_BEHAVE="3.5" - BOTO_CONFIG=/doesnotexist matrix: include: - python: "3.5" env: TEST_SUITE="python setup.py" - python: "3.6" env: DCS="etcd" TEST_SUITE="behave" - python: "3.6" env: DCS="exhibitor" TEST_SUITE="behave" - python: "3.6" env: DCS="consul" TEST_SUITE="behave" - python: "3.6" env: DCS="kubernetes" TEST_SUITE="behave" branches: only: - master - /^v\d+\.\d+(\.\d+)?$/ cache: directories: - $HOME/mycache before_cache: - | rm -fr $HOME/mycache/python* for pv in $PYVERSIONS; do if [[ $TEST_SUITE != "behave" || $pv != $EXCLUDE_BEHAVE ]]; then fpv=$(basename $(readlink $HOME/virtualenv/python${pv})) mv $HOME/virtualenv/${fpv} $HOME/mycache/${fpv} fi done install: - | set -e if [[ $TEST_SUITE == "behave" ]]; then function get_consul() { CC=~/mycache/consul_${CONSULVERSION} if [[ ! -x $CC ]]; then curl -L https://releases.hashicorp.com/consul/${CONSULVERSION}/consul_${CONSULVERSION}_linux_amd64.zip \ | gunzip > $CC [[ ${PIPESTATUS[0]} == 0 ]] || return 1 chmod +x $CC fi ln -s $CC consul } function get_etcd() { EC=~/mycache/etcd_${ETCDVERSION} if [[ ! -x $EC ]]; then curl -L https://github.com/coreos/etcd/releases/download/v${ETCDVERSION}/etcd-v${ETCDVERSION}-linux-amd64.tar.gz \ | tar xz -C . --strip=1 --wildcards --no-anchored etcd [[ ${PIPESTATUS[0]} == 0 ]] || return 1 mv etcd $EC fi ln -s $EC etcd } function get_kubernetes() { wget -O localkube "https://storage.googleapis.com/minikube/k8sReleases/v1.7.0/localkube-linux-amd64" chmod +x localkube sudo nohup ./localkube --logtostderr=true --enable-dns=false > localkube.log 2>&1 & echo "Waiting for localkube to start..." if ! timeout 120 sh -c "while ! curl -ks http://127.0.0.1:8080/ >/dev/null; do sleep 1; done"; then sudo cat localkube.log echo "localkube did not start" exit 1 fi echo "Check certificate permissions" sudo chmod 644 /var/lib/localkube/certs/* sudo ls -altr /var/lib/localkube/certs/ echo "Set up .kube/config" mkdir ~/.kube echo -e "apiVersion: v1\nclusters:\n- cluster:\n certificate-authority: /var/lib/localkube/certs/ca.crt\n server: https://127.0.0.1:8443\n name: local\ncontexts:\n- context:\n cluster: local\n user: myself\n name: local\ncurrent-context: local\nkind: Config\npreferences: {}\nusers:\n- name: myself\n user:\n client-certificate: /var/lib/localkube/certs/apiserver.crt\n client-key: /var/lib/localkube/certs/apiserver.key\n" > ~/.kube/config } function get_exhibitor() { ZC=~/mycache/zookeeper-${ZKVERSION} if [[ ! -d $ZC ]]; then curl -L http://www.apache.org/dist/zookeeper/zookeeper-${ZKVERSION}/zookeeper-${ZKVERSION}.tar.gz | tar xz [[ ${PIPESTATUS[0]} == 0 ]] || return 1 mv zookeeper-${ZKVERSION}/conf/zoo_sample.cfg zookeeper-${ZKVERSION}/conf/zoo.cfg mv zookeeper-${ZKVERSION} $ZC fi $ZC/bin/zkServer.sh start # following lines are 'emulating' exhibitor REST API while true; do echo -e 'HTTP/1.0 200 OK\nContent-Type: application/json\n\n{"servers":["127.0.0.1"],"port":2181}' \ | nc -l 8181 &> /dev/null done& } attempt_num=1 until get_${DCS}; do [[ $attempt_num -ge 3 ]] && exit 1 echo "Attempt $attempt_num failed! Trying again in $attempt_num seconds..." sleep $(( attempt_num++ )) done fi for pv in $PYVERSIONS; do if [[ $TEST_SUITE != "behave" || $pv != $EXCLUDE_BEHAVE ]]; then fpv=$(basename $(readlink $HOME/virtualenv/python$pv)) if [[ -d ~/mycache/${fpv} ]]; then mv ~/virtualenv/${fpv} ~/virtualenv/${fpv}.bckp mv ~/mycache/${fpv} ~/virtualenv/${fpv} fi source ~/virtualenv/python${pv}/bin/activate # explicitly install all needed python modules to cache them for p in '-r requirements.txt' 'psycopg2-binary behave codacy-coverage coverage coveralls flake8 mock pytest-cov pytest setuptools'; do pip install $p --upgrade done fi done script: - | for pv in $PYVERSIONS; do if [[ $TEST_SUITE == "behave" && $pv == $EXCLUDE_BEHAVE ]]; then continue fi source ~/virtualenv/python${pv}/bin/activate if [[ $TEST_SUITE != "behave" ]]; then echo Running unit tests using python${pv} unbuffer $TEST_SUITE test $TEST_SUITE flake8 elif [[ $pv != $EXCLUDE_BEHAVE ]]; then echo Running acceptance tests using python${pv} if ! PATH=.:/usr/lib/postgresql/9.6/bin:$PATH unbuffer $TEST_SUITE; then # output all log files when tests are failing grep . features/output/*_failed/*postgres?.* exit 1 fi fi done set +e after_success: # before_cache is executed earlier than after_success, so we need to restore one of virtualenv directories - fpv=$(basename $(readlink $HOME/virtualenv/python3.6)) && mv $HOME/mycache/${fpv} $HOME/virtualenv/${fpv} - coveralls - if [[ $TEST_SUITE != "behave" ]]; then python-codacy-coverage -r coverage.xml; fi - if [[ $DCS == "exhibitor" ]]; then ~/mycache/zookeeper-${ZKVERSION}/bin/zkServer.sh stop; fi - sudo kill $(jobs -p) patroni-1.6.4/Dockerfile000066400000000000000000000165371361356115100152060ustar00rootroot00000000000000## This Dockerfile is meant to aid in the building and debugging patroni whilst developing on your local machine ## It has all the necessary components to play/debug with a single node appliance, running etcd ARG PG_MAJOR=10 ARG COMPRESS=false ARG PGHOME=/home/postgres ARG PGDATA=$PGHOME/data ARG LC_ALL=C.UTF-8 ARG LANG=C.UTF-8 FROM postgres:$PG_MAJOR as builder ARG PGHOME ARG PGDATA ARG LC_ALL ARG LANG ENV ETCDVERSION=2.3.8 CONFDVERSION=0.16.0 RUN set -ex \ && export DEBIAN_FRONTEND=noninteractive \ && echo 'APT::Install-Recommends "0";\nAPT::Install-Suggests "0";' > /etc/apt/apt.conf.d/01norecommend \ && apt-get update -y \ # postgres:10 is based on debian, which has the patroni package. We will install all required dependencies && apt-cache depends patroni | sed -n -e 's/.*Depends: \(python3-.\+\)$/\1/p' \ | grep -Ev '^python3-(sphinx|etcd|consul|kazoo|kubernetes)' \ | xargs apt-get install -y vim curl less jq locales haproxy sudo \ python3-etcd python3-kazoo python3-pip busybox \ net-tools iputils-ping --fix-missing \ && pip3 install dumb-init \ \ # Cleanup all locales but en_US.UTF-8 && find /usr/share/i18n/charmaps/ -type f ! -name UTF-8.gz -delete \ && find /usr/share/i18n/locales/ -type f ! -name en_US ! -name en_GB ! -name i18n ! -name iso14651_t1 ! -name iso14651_t1_common ! -name 'translit_*' -delete \ && echo 'en_US.UTF-8 UTF-8' > /usr/share/i18n/SUPPORTED \ \ # Make sure we have a en_US.UTF-8 locale available && localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 \ \ # haproxy dummy config && echo 'global\n stats socket /run/haproxy/admin.sock mode 660 level admin' > /etc/haproxy/haproxy.cfg \ \ # vim config && echo 'syntax on\nfiletype plugin indent on\nset mouse-=a\nautocmd FileType yaml setlocal ts=2 sts=2 sw=2 expandtab' > /etc/vim/vimrc.local \ \ # Prepare postgres/patroni/haproxy environment && mkdir -p $PGHOME/.config/patroni /patroni /run/haproxy \ && ln -s ../../postgres0.yml $PGHOME/.config/patroni/patronictl.yaml \ && ln -s /patronictl.py /usr/local/bin/patronictl \ && sed -i "s|/var/lib/postgresql.*|$PGHOME:/bin/bash|" /etc/passwd \ && chown -R postgres:postgres /var/log \ \ # Download etcd && curl -sL https://github.com/coreos/etcd/releases/download/v${ETCDVERSION}/etcd-v${ETCDVERSION}-linux-amd64.tar.gz \ | tar xz -C /usr/local/bin --strip=1 --wildcards --no-anchored etcd etcdctl \ \ # Download confd && curl -sL https://github.com/kelseyhightower/confd/releases/download/v${CONFDVERSION}/confd-${CONFDVERSION}-linux-amd64 \ > /usr/local/bin/confd && chmod +x /usr/local/bin/confd \ \ # Clean up all useless packages and some files && apt-get purge -y --allow-remove-essential python3-pip gzip bzip2 util-linux e2fsprogs \ libmagic1 bsdmainutils login ncurses-bin libmagic-mgc e2fslibs bsdutils \ exim4-config gnupg-agent dirmngr libpython2.7-stdlib libpython2.7-minimal \ && apt-get autoremove -y \ && apt-get clean -y \ && rm -rf /var/lib/apt/lists/* \ /root/.cache \ /var/cache/debconf/* \ /etc/rc?.d \ /etc/systemd \ /docker-entrypoint* \ /sbin/pam* \ /sbin/swap* \ /sbin/unix* \ /usr/local/bin/gosu \ /usr/sbin/[acgipr]* \ /usr/sbin/*user* \ /usr/share/doc* \ /usr/share/man \ /usr/share/info \ /usr/share/i18n/locales/translit_hangul \ /usr/share/locale/?? \ /usr/share/locale/??_?? \ /usr/share/postgresql/*/man \ /usr/share/postgresql-common/pg_wrapper \ /usr/share/vim/vim80/doc \ /usr/share/vim/vim80/lang \ /usr/share/vim/vim80/tutor \ # /var/lib/dpkg/info/* \ && find /usr/bin -xtype l -delete \ && find /var/log -type f -exec truncate --size 0 {} \; \ && find /usr/lib/python3/dist-packages -name '*test*' | xargs rm -fr \ && find /lib/x86_64-linux-gnu/security -type f ! -name pam_env.so ! -name pam_permit.so ! -name pam_unix.so -delete # perform compression if it is necessary ARG COMPRESS RUN if [ "$COMPRESS" = "true" ]; then \ set -ex \ # Allow certain sudo commands from postgres && echo 'postgres ALL=(ALL) NOPASSWD: /bin/tar xpJf /a.tar.xz -C /, /bin/rm /a.tar.xz, /bin/ln -snf dash /bin/sh' >> /etc/sudoers \ && ln -snf busybox /bin/sh \ && files="/bin/sh /usr/bin/sudo /usr/lib/sudo/sudoers.so /lib/x86_64-linux-gnu/security/pam_*.so" \ && libs="$(ldd $files | awk '{print $3;}' | grep '^/' | sort -u) /lib/x86_64-linux-gnu/ld-linux-x86-64.so.* /lib/x86_64-linux-gnu/libnsl.so.* /lib/x86_64-linux-gnu/libnss_compat.so.*" \ && (echo /var/run $files $libs | tr ' ' '\n' && realpath $files $libs) | sort -u | sed 's/^\///' > /exclude \ && find /etc/alternatives -xtype l -delete \ && save_dirs="usr lib var bin sbin etc/ssl etc/init.d etc/alternatives etc/apt" \ && XZ_OPT=-e9v tar -X /exclude -cpJf a.tar.xz $save_dirs \ # we call "cat /exclude" to avoid including files from the $save_dirs that are also among # the exceptions listed in the /exclude, as "uniq -u" eliminates all non-unique lines. # By calling "cat /exclude" a second time we guarantee that there will be at least two lines # for each exception and therefore they will be excluded from the output passed to 'rm'. && /bin/busybox sh -c "(find $save_dirs -not -type d && cat /exclude /exclude && echo exclude) | sort | uniq -u | xargs /bin/busybox rm" \ && /bin/busybox --install -s \ && /bin/busybox sh -c "find $save_dirs -type d -depth -exec rmdir -p {} \; 2> /dev/null"; \ fi FROM scratch COPY --from=builder / / LABEL maintainer="Alexander Kukushkin " ARG PG_MAJOR ARG COMPRESS ARG PGHOME ARG PGDATA ARG LC_ALL ARG LANG ARG PGBIN=/usr/lib/postgresql/$PG_MAJOR/bin ENV LC_ALL=$LC_ALL LANG=$LANG EDITOR=/usr/bin/editor ENV PGDATA=$PGDATA PATH=$PATH:$PGBIN COPY patroni /patroni/ COPY extras/confd/conf.d/haproxy.toml /etc/confd/conf.d/ COPY extras/confd/templates/haproxy.tmpl /etc/confd/templates/ COPY patroni*.py docker/entrypoint.sh / COPY postgres?.yml $PGHOME/ WORKDIR $PGHOME RUN sed -i 's/env python/&3/' /patroni*.py \ # "fix" patroni configs && sed -i 's/^\( connect_address:\| - host\)/#&/' postgres?.yml \ && sed -i 's/^ listen: 127.0.0.1/ listen: 0.0.0.0/' postgres?.yml \ && sed -i "s|^\( data_dir: \).*|\1$PGDATA|" postgres?.yml \ && sed -i "s|^#\( bin_dir: \).*|\1$PGBIN|" postgres?.yml \ && sed -i 's/^ - encoding: UTF8/ - locale: en_US.UTF-8\n&/' postgres?.yml \ && sed -i 's/^\(scope\|name\|etcd\| host\| authentication\| pg_hba\| parameters\):/#&/' postgres?.yml \ && sed -i 's/^ \(replication\|superuser\|rewind\|unix_socket_directories\|\(\( \)\{0,1\}\(username\|password\)\)\):/#&/' postgres?.yml \ && sed -i 's/^ parameters:/ pg_hba:\n - local all all trust\n - host replication all all md5\n - host all all all md5\n&\n max_connections: 100/' postgres?.yml \ && if [ "$COMPRESS" = "true" ]; then chmod u+s /usr/bin/sudo; fi \ && chmod +s /bin/ping \ && chown -R postgres:postgres $PGHOME /run /etc/haproxy USER postgres ENTRYPOINT ["/bin/sh", "/entrypoint.sh"] patroni-1.6.4/LICENSE000066400000000000000000000020761361356115100142120ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2015 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. patroni-1.6.4/MAINTAINERS000066400000000000000000000002231361356115100146720ustar00rootroot00000000000000Alexander Kukushkin Feike Steenbergen Oleksii Kliukin patroni-1.6.4/MANIFEST.in000066400000000000000000000001031361356115100147300ustar00rootroot00000000000000include requirements* include *.rst recursive-include patroni *.py patroni-1.6.4/README.rst000066400000000000000000000173631361356115100147010ustar00rootroot00000000000000|Build Status| |Coverage Status| Patroni: A Template for PostgreSQL HA with ZooKeeper, etcd or Consul -------------------------------------------------------------------- You can find a version of this documentation that is searchable and also easier to navigate at `patroni.readthedocs.io `__. There are many ways to run high availability with PostgreSQL; for a list, see the `PostgreSQL Documentation `__. Patroni is a template for you to create your own customized, high-availability solution using Python and - for maximum accessibility - a distributed configuration store like `ZooKeeper `__, `etcd `__, `Consul `__ or `Kubernetes `__. Database engineers, DBAs, DevOps engineers, and SREs who are looking to quickly deploy HA PostgreSQL in the datacenter-or anywhere else-will hopefully find it useful. We call Patroni a "template" because it is far from being a one-size-fits-all or plug-and-play replication system. It will have its own caveats. Use wisely. **Note to Kubernetes users**: Patroni can run natively on top of Kubernetes. Take a look at the `Kubernetes `__ chapter of the Patroni documentation. .. contents:: :local: :depth: 1 :backlinks: none ================= How Patroni Works ================= Patroni originated as a fork of `Governor `__, the project from Compose. It includes plenty of new features. For an example of a Docker-based deployment with Patroni, see `Spilo `__, currently in use at Zalando. For additional background info, see: * `Elephants on Automatic: HA Clustered PostgreSQL with Helm `_, talk by Josh Berkus and Oleksii Kliukin at KubeCon Berlin 2017 * `PostgreSQL HA with Kubernetes and Patroni `__, talk by Josh Berkus at KubeCon 2016 (video) * `Feb. 2016 Zalando Tech blog post `__ ================== Development Status ================== Patroni is in active development and accepts contributions. See our `Contributing `__ section below for more details. We report new releases information `here `__. ========= Community ========= There are two places to connect with the Patroni community: `on github `__, via Issues and PRs, and on channel #patroni in the `PostgreSQL Slack `__. If you're using Patroni, or just interested, please join us. =================================== Technical Requirements/Installation =================================== **Pre-requirements for Mac OS** To install requirements on a Mac, run the following: :: brew install postgresql etcd haproxy libyaml python **Psycopg2** Starting from `psycopg2-2.8 `__ the binary version of psycopg2 will no longer be installed by default. Installing it from the source code requires C compiler and postgres+python dev packages. Since in the python world it is not possible to specify dependency as ``psycopg2 OR psycopg2-binary`` you will have to decide how to install it. There are a few options available: 1. Use the package manager from your distro :: sudo apt-get install python-psycopg2 # install python2 psycopg2 module on Debian/Ubuntu sudo apt-get install python3-psycopg2 # install python3 psycopg2 module on Debian/Ubuntu sudo yum install python-psycopg2 # install python2 psycopg2 on RedHat/Fedora/CentOS 2. Install psycopg2 from the binary package :: pip install psycopg2-binary 3. Install psycopg2 from source :: pip install psycopg2>=2.5.4 **General installation for pip** Patroni can be installed with pip: :: pip install patroni[dependencies] where dependencies can be either empty, or consist of one or more of the following: etcd `python-etcd` module in order to use Etcd as DCS consul `python-consul` module in order to use Consul as DCS zookeeper `kazoo` module in order to use Zookeeper as DCS exhibitor `kazoo` module in order to use Exhibitor as DCS (same dependencies as for Zookeeper) kubernetes `kubernetes` module in order to use Kubernetes as DCS in Patroni aws `boto` in order to use AWS callbacks For example, the command in order to install Patroni together with dependencies for Etcd as a DCS and AWS callbacks is: :: pip install patroni[etcd,aws] Note that external tools to call in the replica creation or custom bootstap scripts (i.e. WAL-E) should be installed independently of Patroni. ======================= Running and Configuring ======================= To get started, do the following from different terminals: :: > etcd --data-dir=data/etcd > ./patroni.py postgres0.yml > ./patroni.py postgres1.yml You will then see a high-availability cluster start up. Test different settings in the YAML files to see how the cluster's behavior changes. Kill some of the components to see how the system behaves. Add more ``postgres*.yml`` files to create an even larger cluster. Patroni provides an `HAProxy `__ configuration, which will give your application a single endpoint for connecting to the cluster's leader. To configure, run: :: > haproxy -f haproxy.cfg :: > psql --host 127.0.0.1 --port 5000 postgres ================== YAML Configuration ================== Go `here `__ for comprehensive information about settings for etcd, consul, and ZooKeeper. And for an example, see `postgres0.yml `__. ========================= Environment Configuration ========================= Go `here `__ for comprehensive information about configuring(overriding) settings via environment variables. =================== Replication Choices =================== Patroni uses Postgres' streaming replication, which is asynchronous by default. Patroni's asynchronous replication configuration allows for ``maximum_lag_on_failover`` settings. This setting ensures failover will not occur if a follower is more than a certain number of bytes behind the leader. This setting should be increased or decreased based on business requirements. It's also possible to use synchronous replication for better durability guarantees. See `replication modes documentation `__ for details. ====================================== Applications Should Not Use Superusers ====================================== When connecting from an application, always use a non-superuser. Patroni requires access to the database to function properly. By using a superuser from an application, you can potentially use the entire connection pool, including the connections reserved for superusers, with the ``superuser_reserved_connections`` setting. If Patroni cannot access the Primary because the connection pool is full, behavior will be undesirable. .. |Build Status| image:: https://travis-ci.org/zalando/patroni.svg?branch=master :target: https://travis-ci.org/zalando/patroni .. |Coverage Status| image:: https://coveralls.io/repos/zalando/patroni/badge.svg?branch=master :target: https://coveralls.io/r/zalando/patroni?branch=master patroni-1.6.4/TODO.md000066400000000000000000000010351361356115100142660ustar00rootroot00000000000000Failover ======== - When determining who should become master, include the minor version of PostgreSQL in the decision. Configuration ============== - Provide a way to change pg_hba.conf of a running cluster on the Patroni level, without changing individual nodes. - Provide hooks to store and retrieve cluster-wide passwords without exposing them in a plain-text form to unauthorized users. Documentation ============== - Document how to run cascading replication and possibly initialize the cluster without an access to the master node. patroni-1.6.4/docker-compose.yml000066400000000000000000000034061361356115100166400ustar00rootroot00000000000000# docker compose file for running a 3-node PostgreSQL cluster # with 3-node etcd cluster as the DCS and one haproxy node version: "2" networks: demo: services: etcd1: image: patroni networks: [ demo ] env_file: docker/etcd.env container_name: demo-etcd1 hostname: etcd1 command: etcd -name etcd1 -initial-advertise-peer-urls http://etcd1:2380 etcd2: image: patroni networks: [ demo ] env_file: docker/etcd.env container_name: demo-etcd2 hostname: etcd2 command: etcd -name etcd2 -initial-advertise-peer-urls http://etcd2:2380 etcd3: image: patroni networks: [ demo ] env_file: docker/etcd.env container_name: demo-etcd3 hostname: etcd3 command: etcd -name etcd3 -initial-advertise-peer-urls http://etcd3:2380 patroni1: image: patroni networks: [ demo ] env_file: docker/patroni.env hostname: patroni1 container_name: demo-patroni1 environment: PATRONI_NAME: patroni1 patroni2: image: patroni networks: [ demo ] env_file: docker/patroni.env hostname: patroni2 container_name: demo-patroni2 environment: PATRONI_NAME: patroni2 patroni3: image: patroni networks: [ demo ] env_file: docker/patroni.env hostname: patroni3 container_name: demo-patroni3 environment: PATRONI_NAME: patroni3 haproxy: image: patroni networks: [ demo ] env_file: docker/patroni.env hostname: haproxy container_name: demo-haproxy ports: - "5000:5000" - "5001:5001" command: haproxy patroni-1.6.4/docker/000077500000000000000000000000001361356115100144475ustar00rootroot00000000000000patroni-1.6.4/docker/README.md000066400000000000000000000132061361356115100157300ustar00rootroot00000000000000# Patroni Dockerfile You can run Patroni in a docker container using this Dockerfile This Dockerfile is meant in aiding development of Patroni and quick testing of features. It is not a production-worthy Dockerfile docker build -t patroni . # Examples ## Standalone Patroni docker run -d patroni ## Three-node Patroni cluster with three-node etcd cluster and one haproxy container using docker-compose Example session: $ docker-compose up -d Creating demo-haproxy ... Creating demo-patroni2 ... Creating demo-patroni1 ... Creating demo-patroni3 ... Creating demo-etcd2 ... Creating demo-etcd1 ... Creating demo-etcd3 ... Creating demo-haproxy Creating demo-patroni2 Creating demo-patroni1 Creating demo-patroni3 Creating demo-etcd1 Creating demo-etcd2 Creating demo-etcd2 ... done $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 5b7a90b4cfbf patroni "/bin/sh /entrypoint…" 29 seconds ago Up 27 seconds demo-etcd2 e30eea5222f2 patroni "/bin/sh /entrypoint…" 29 seconds ago Up 27 seconds demo-etcd1 83bcf3cb208f patroni "/bin/sh /entrypoint…" 29 seconds ago Up 27 seconds demo-etcd3 922532c56e7d patroni "/bin/sh /entrypoint…" 29 seconds ago Up 28 seconds demo-patroni3 14f875e445f3 patroni "/bin/sh /entrypoint…" 29 seconds ago Up 28 seconds demo-patroni2 110d1073b383 patroni "/bin/sh /entrypoint…" 29 seconds ago Up 28 seconds demo-patroni1 5af5e6e36028 patroni "/bin/sh /entrypoint…" 29 seconds ago Up 28 seconds 0.0.0.0:5000-5001->5000-5001/tcp demo-haproxy $ docker logs demo-patroni1 2019-02-20 08:19:32,714 INFO: Failed to import patroni.dcs.consul 2019-02-20 08:19:32,737 INFO: Selected new etcd server http://etcd3:2379 2019-02-20 08:19:35,140 INFO: Lock owner: None; I am patroni1 2019-02-20 08:19:35,174 INFO: trying to bootstrap a new cluster ... 2019-02-20 08:19:39,310 INFO: postmaster pid=37 2019-02-20 08:19:39.314 UTC [37] LOG: listening on IPv4 address "0.0.0.0", port 5432 2019-02-20 08:19:39.321 UTC [37] LOG: listening on Unix socket "/var/run/postgresql/.s.PGSQL.5432" 2019-02-20 08:19:39.353 UTC [39] LOG: database system was shut down at 2019-02-20 08:19:36 UTC 2019-02-20 08:19:39.354 UTC [40] FATAL: the database system is starting up localhost:5432 - rejecting connections 2019-02-20 08:19:39.369 UTC [37] LOG: database system is ready to accept connections localhost:5432 - accepting connections 2019-02-20 08:19:39,383 INFO: establishing a new patroni connection to the postgres cluster 2019-02-20 08:19:39,408 INFO: running post_bootstrap 2019-02-20 08:19:39,432 WARNING: Could not activate Linux watchdog device: "Can't open watchdog device: [Errno 2] No such file or directory: '/dev/watchdog'" 2019-02-20 08:19:39,515 INFO: initialized a new cluster 2019-02-20 08:19:49,424 INFO: Lock owner: patroni1; I am patroni1 2019-02-20 08:19:49,447 INFO: Lock owner: patroni1; I am patroni1 2019-02-20 08:19:49,480 INFO: no action. i am the leader with the lock 2019-02-20 08:19:59,422 INFO: Lock owner: patroni1; I am patroni1 $ docker exec -ti demo-patroni1 bash postgres@patroni1:~$ patronictl list +---------+----------+------------+--------+---------+----+-----------+ | Cluster | Member | Host | Role | State | TL | Lag in MB | +---------+----------+------------+--------+---------+----+-----------+ | demo | patroni1 | 172.22.0.3 | Leader | running | 1 | 0 | | demo | patroni2 | 172.22.0.7 | | running | 1 | 0 | | demo | patroni3 | 172.22.0.4 | | running | 1 | 0 | +---------+----------+------------+--------+---------+----+-----------+ postgres@patroni1:~$ etcdctl ls --recursive --sort -p /service/demo /service/demo/config /service/demo/initialize /service/demo/leader /service/demo/members/ /service/demo/members/patroni1 /service/demo/members/patroni2 /service/demo/members/patroni3 /service/demo/optime/ /service/demo/optime/leader postgres@patroni1:~$ etcdctl member list 1bab629f01fa9065: name=etcd3 peerURLs=http://etcd3:2380 clientURLs=http://etcd3:2379 isLeader=false 8ecb6af518d241cc: name=etcd2 peerURLs=http://etcd2:2380 clientURLs=http://etcd2:2379 isLeader=true b2e169fcb8a34028: name=etcd1 peerURLs=http://etcd1:2380 clientURLs=http://etcd1:2379 isLeader=false postgres@patroni1:~$ exit $ psql -h localhost -p 5000 -U postgres -W Password: postgres psql (11.2 (Ubuntu 11.2-1.pgdg18.04+1), server 10.7 (Debian 10.7-1.pgdg90+1)) Type "help" for help. localhost/postgres=# select pg_is_in_recovery(); pg_is_in_recovery ─────────────────── f (1 row) localhost/postgres=# \q $ psql -h localhost -p 5001 -U postgres -W Password: postgres psql (11.2 (Ubuntu 11.2-1.pgdg18.04+1), server 10.7 (Debian 10.7-1.pgdg90+1)) Type "help" for help. localhost/postgres=# select pg_is_in_recovery(); pg_is_in_recovery ─────────────────── t (1 row) patroni-1.6.4/docker/entrypoint.sh000077500000000000000000000045671361356115100172350ustar00rootroot00000000000000#!/bin/sh if [ -f /a.tar.xz ]; then echo "decompressing image..." sudo tar xpJf /a.tar.xz -C / > /dev/null 2>&1 sudo rm /a.tar.xz sudo ln -snf dash /bin/sh fi readonly PATRONI_SCOPE=${PATRONI_SCOPE:-batman} PATRONI_NAMESPACE=${PATRONI_NAMESPACE:-/service} readonly PATRONI_NAMESPACE=${PATRONI_NAMESPACE%/} readonly DOCKER_IP=$(hostname --ip-address) case "$1" in haproxy) haproxy -f /etc/haproxy/haproxy.cfg -p /var/run/haproxy.pid -D CONFD="confd -prefix=$PATRONI_NAMESPACE/$PATRONI_SCOPE -interval=10 -backend" if [ ! -z "$PATRONI_ZOOKEEPER_HOSTS" ]; then while ! /usr/share/zookeeper/bin/zkCli.sh -server $PATRONI_ZOOKEEPER_HOSTS ls /; do sleep 1 done exec dumb-init $CONFD zookeeper -node $PATRONI_ZOOKEEPER_HOSTS else while ! etcdctl cluster-health 2> /dev/null; do sleep 1 done exec dumb-init $CONFD etcd -node $(echo $ETCDCTL_ENDPOINTS | sed 's/,/ -node /g') fi ;; etcd) exec "$@" -advertise-client-urls http://$DOCKER_IP:2379 ;; zookeeper) exec /usr/share/zookeeper/bin/zkServer.sh start-foreground ;; esac ## We start an etcd if [ -z "$PATRONI_ETCD_HOSTS" ] && [ -z "$PATRONI_ZOOKEEPER_HOSTS" ]; then export PATRONI_ETCD_URL="http://127.0.0.1:2379" etcd --data-dir /tmp/etcd.data -advertise-client-urls=$PATRONI_ETCD_URL -listen-client-urls=http://0.0.0.0:2379 > /var/log/etcd.log 2> /var/log/etcd.err & fi export PATRONI_SCOPE export PATRONI_NAMESPACE export PATRONI_NAME="${PATRONI_NAME:-$(hostname)}" export PATRONI_RESTAPI_CONNECT_ADDRESS="$DOCKER_IP:8008" export PATRONI_RESTAPI_LISTEN="0.0.0.0:8008" export PATRONI_admin_PASSWORD="${PATRONI_admin_PASSWORD:-admin}" export PATRONI_admin_OPTIONS="${PATRONI_admin_OPTIONS:-createdb, createrole}" export PATRONI_POSTGRESQL_CONNECT_ADDRESS="$DOCKER_IP:5432" export PATRONI_POSTGRESQL_LISTEN="0.0.0.0:5432" export PATRONI_POSTGRESQL_DATA_DIR="${PATRONI_POSTGRESQL_DATA_DIR:-$PGDATA}" export PATRONI_REPLICATION_USERNAME="${PATRONI_REPLICATION_USERNAME:-replicator}" export PATRONI_REPLICATION_PASSWORD="${PATRONI_REPLICATION_PASSWORD:-replicate}" export PATRONI_SUPERUSER_USERNAME="${PATRONI_SUPERUSER_USERNAME:-postgres}" export PATRONI_SUPERUSER_PASSWORD="${PATRONI_SUPERUSER_PASSWORD:-postgres}" exec python3 /patroni.py postgres0.yml patroni-1.6.4/docker/etcd.env000066400000000000000000000003661361356115100161050ustar00rootroot00000000000000ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380 ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379 ETCD_INITIAL_CLUSTER=etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380 ETCD_INITIAL_CLUSTER_STATE=new ETCD_INITIAL_CLUSTER_TOKEN=tutorial patroni-1.6.4/docker/patroni.env000066400000000000000000000007011361356115100166330ustar00rootroot00000000000000PATRONI_SCOPE=demo PATRONI_ETCD_HOSTS='etcd1:2379','etcd2:2379','etcd3:2379' PATRONI_RESTAPI_USERNAME=admin PATRONI_RESTAPI_PASSWORD=admin PATRONI_SUPERUSER_USERNAME=postgres PATRONI_SUPERUSER_PASSWORD=postgres PATRONI_REPLICATION_USERNAME=replicator PATRONI_REPLICATION_PASSWORD=replicate PATRONI_admin_PASSWORD=admin PATRONI_admin_OPTIONS=createdb,createrole # for etcdctl ETCDCTL_ENDPOINTS=http://etcd1:2379,http://etcd2:2379,http://etcd3:2379 patroni-1.6.4/docs/000077500000000000000000000000001361356115100141305ustar00rootroot00000000000000patroni-1.6.4/docs/CONTRIBUTING.rst000066400000000000000000000016661361356115100166020ustar00rootroot00000000000000.. _contributing: Contributing guidelines ======================= Wanna contribute to Patroni? Yay - here is how! Chatting -------- Just want to chat with other Patroni users? Looking for interactive troubleshooting help? Join us on channel #patroni in the `PostgreSQL Slack `__. Reporting issues ---------------- If you have a question about patroni or have a problem using it, please read the :ref:`README ` before filing an issue. Also double check with the current issues on our `Issues Tracker `__. Contributing a pull request --------------------------- 1) Submit a comment to the relevant issue or create a new issue describing your proposed change. 2) Do a fork, develop and test your code changes. 3) Include documentation 4) Submit a pull request. You'll get feedback about your pull request as soon as possible. Happy Patroni hacking ;-) patroni-1.6.4/docs/ENVIRONMENT.rst000066400000000000000000000422731361356115100164760ustar00rootroot00000000000000.. _environment: Environment Configuration Settings ================================== It is possible to override some of the configuration parameters defined in the Patroni configuration file using the system environment variables. This document lists all environment variables handled by Patroni. The values set via those variables always take precedence over the ones set in the Patroni configuration file. Global/Universal ---------------- - **PATRONI\_CONFIGURATION**: it is possible to set the entire configuration for the Patroni via ``PATRONI_CONFIGURATION`` environment variable. In this case any other environment variables will not be considered! - **PATRONI\_NAME**: name of the node where the current instance of Patroni is running. Must be unique for the cluster. - **PATRONI\_NAMESPACE**: path within the configuration store where Patroni will keep information about the cluster. Default value: "/service" - **PATRONI\_SCOPE**: cluster name Log --- - **PATRONI\_LOG\_LEVEL**: sets the general logging level. Default value is **INFO** (see `the docs for Python logging `_) - **PATRONI\_LOG\_TRACEBACK\_LEVEL**: sets the level where tracebacks will be visible. Default value is **ERROR**. Set it to **DEBUG** if you want to see tracebacks only if you enable **PATRONI\_LOG\_LEVEL=DEBUG**. - **PATRONI\_LOG\_FORMAT**: sets the log formatting string. Default value is **%(asctime)s %(levelname)s: %(message)s** (see `the LogRecord attributes `_) - **PATRONI\_LOG\_DATEFORMAT**: sets the datetime formatting string. (see the `formatTime() documentation `_) - **PATRONI\_LOG\_MAX\_QUEUE\_SIZE**: Patroni is using two-step logging. Log records are written into the in-memory queue and there is a separate thread which pulls them from the queue and writes to stderr or file. The maximum size of the internal queue is limited by default by **1000** records, which is enough to keep logs for the past 1h20m. - **PATRONI\_LOG\_DIR**: Directory to write application logs to. The directory must exist and be writable by the user executing Patroni. If you set this env variable, the application will retain 4 25MB logs by default. You can tune those retention values with `PATRONI_LOG_FILE_NUM` and `PATRONI_LOG_FILE_SIZE` (see below). - **PATRONI\_LOG\_FILE\_NUM**: The number of application logs to retain. - **PATRONI\_LOG\_FILE\_SIZE**: Size of patroni.log file (in bytes) that triggers a log rolling. - **PATRONI\_LOG\_LOGGERS**: Redefine logging level per python module. Example ``PATRONI_LOG_LOGGERS="{patroni.postmaster: WARNING, urllib3: DEBUG}"`` Bootstrap configuration ----------------------- It is possible to create new database users right after the successful initialization of a new cluster. This process is defined by the following variables: - **PATRONI\_\_PASSWORD=''** - **PATRONI\_\_OPTIONS='list,of,options'** Example: defining ``PATRONI_admin_PASSWORD=strongpasswd`` and ``PATRONI_admin_OPTIONS='createrole,createdb'`` will cause creation of the user **admin** with the password **strongpasswd** that is allowed to create other users and databases. Consul ------ - **PATRONI\_CONSUL\_HOST**: the host:port for the Consul endpoint. - **PATRONI\_CONSUL\_URL**: url for the Consul, in format: http(s)://host:port - **PATRONI\_CONSUL\_PORT**: (optional) Consul port - **PATRONI\_CONSUL\_SCHEME**: (optional) **http** or **https**, defaults to **http** - **PATRONI\_CONSUL\_TOKEN**: (optional) ACL token - **PATRONI\_CONSUL\_VERIFY**: (optional) whether to verify the SSL certificate for HTTPS requests - **PATRONI\_CONSUL\_CACERT**: (optional) The ca certificate. If present it will enable validation. - **PATRONI\_CONSUL\_CERT**: (optional) File with the client certificate - **PATRONI\_CONSUL\_KEY**: (optional) File with the client key. Can be empty if the key is part of certificate. - **PATRONI\_CONSUL\_DC**: (optional) Datacenter to communicate with. By default the datacenter of the host is used. - **PATRONI\_CONSUL\_CONSISTENCY**: (optional) Select consul consistency mode. Possible values are ``default``, ``consistent``, or ``stale`` (more details in `consul API reference `__) - **PATRONI\_CONSUL\_CHECKS**: (optional) list of Consul health checks used for the session. By default an empty list is used. - **PATRONI\_CONSUL\_REGISTER\_SERVICE**: (optional) whether or not to register a service with the name defined by the scope parameter and the tag master, replica or standby-leader depending on the node's role. Defaults to **false** - **PATRONI\_CONSUL\_SERVICE\_CHECK\_INTERVAL**: (optional) how often to perform health check against registered url Etcd ---- - **PATRONI\_ETCD\_PROXY**: proxy url for the etcd. If you are connecting to the etcd using proxy, use this parameter instead of **PATRONI\_ETCD\_URL** - **PATRONI\_ETCD\_URL**: url for the etcd, in format: http(s)://(username:password@)host:port - **PATRONI\_ETCD\_HOSTS**: list of etcd endpoints in format 'host1:port1','host2:port2',etc... - **PATRONI\_ETCD\_USE\_PROXIES**: If this parameter is set to true, Patroni will consider **hosts** as a list of proxies and will not perform a topology discovery of etcd cluster but stick to a fixed list of **hosts**. - **PATRONI\_ETCD\_PROTOCOL**: http or https, if not specified http is used. If the **url** or **proxy** is specified - will take protocol from them. - **PATRONI\_ETCD\_HOST**: the host:port for the etcd endpoint. - **PATRONI\_ETCD\_SRV**: Domain to search the SRV record(s) for cluster autodiscovery. - **PATRONI\_ETCD\_USERNAME**: username for etcd authentication. - **PATRONI\_ETCD\_PASSWORD**: password for etcd authentication. - **PATRONI\_ETCD\_CACERT**: The ca certificate. If present it will enable validation. - **PATRONI\_ETCD\_CERT**: File with the client certificate. - **PATRONI\_ETCD\_KEY**: File with the client key. Can be empty if the key is part of certificate. ZooKeeper --------- - **PATRONI\_ZOOKEEPER\_HOSTS**: comma separated list of ZooKeeper cluster members: "'host1:port1','host2:port2','etc...'". It is important to quote every single entity! Exhibitor --------- - **PATRONI\_EXHIBITOR\_HOSTS**: initial list of Exhibitor (ZooKeeper) nodes in format: 'host1,host2,etc...'. This list updates automatically whenever the Exhibitor (ZooKeeper) cluster topology changes. - **PATRONI\_EXHIBITOR\_PORT**: Exhibitor port. .. _kubernetes_environment: Kubernetes ---------- - **PATRONI\_KUBERNETES\_NAMESPACE**: (optional) Kubernetes namespace where the Patroni pod is running. Default value is `default`. - **PATRONI\_KUBERNETES\_LABELS**: Labels in format ``{label1: value1, label2: value2}``. These labels will be used to find existing objects (Pods and either Endpoints or ConfigMaps) associated with the current cluster. Also Patroni will set them on every object (Endpoint or ConfigMap) it creates. - **PATRONI\_KUBERNETES\_SCOPE\_LABEL**: (optional) name of the label containing cluster name. Default value is `cluster-name`. - **PATRONI\_KUBERNETES\_ROLE\_LABEL**: (optional) name of the label containing Postgres role (`master` or `replica`). Patroni will set this label on the pod it is running in. Default value is `role`. - **PATRONI\_KUBERNETES\_USE\_ENDPOINTS**: (optional) if set to true, Patroni will use Endpoints instead of ConfigMaps to run leader elections and keep cluster state. - **PATRONI\_KUBERNETES\_POD\_IP**: (optional) IP address of the pod Patroni is running in. This value is required when `PATRONI_KUBERNETES_USE_ENDPOINTS` is enabled and is used to populate the leader endpoint subsets when the pod's PostgreSQL is promoted. - **PATRONI\_KUBERNETES\_PORTS**: (optional) if the Service object has the name for the port, the same name must appear in the Endpoint object, otherwise service won't work. For example, if your service is defined as ``{Kind: Service, spec: {ports: [{name: postgresql, port: 5432, targetPort: 5432}]}}``, then you have to set ``PATRONI_KUBERNETES_PORTS='[{"name": "postgresql", "port": 5432}]'`` and Patroni will use it for updating subsets of the leader Endpoint. This parameter is used only if `PATRONI_KUBERNETES_USE_ENDPOINTS` is set. PostgreSQL ---------- - **PATRONI\_POSTGRESQL\_LISTEN**: IP address + port that Postgres listens to. Multiple comma-separated addresses are permitted, as long as the port component is appended after to the last one with a colon, i.e. ``listen: 127.0.0.1,127.0.0.2:5432``. Patroni will use the first address from this list to establish local connections to the PostgreSQL node. - **PATRONI\_POSTGRESQL\_CONNECT\_ADDRESS**: IP address + port through which Postgres is accessible from other nodes and applications. - **PATRONI\_POSTGRESQL\_DATA\_DIR**: The location of the Postgres data directory, either existing or to be initialized by Patroni. - **PATRONI\_POSTGRESQL\_CONFIG\_DIR**: The location of the Postgres configuration directory, defaults to the data directory. Must be writable by Patroni. - **PATRONI\_POSTGRESQL\_BIN_DIR**: Path to PostgreSQL binaries. (pg_ctl, pg_rewind, pg_basebackup, postgres) The default value is an empty string meaning that PATH environment variable will be used to find the executables. - **PATRONI\_POSTGRESQL\_PGPASS**: path to the `.pgpass `__ password file. Patroni creates this file before executing pg\_basebackup and under some other circumstances. The location must be writable by Patroni. - **PATRONI\_REPLICATION\_USERNAME**: replication username; the user will be created during initialization. Replicas will use this user to access master via streaming replication - **PATRONI\_REPLICATION\_PASSWORD**: replication password; the user will be created during initialization. - **PATRONI\_REPLICATION\_SSLMODE**: (optional) maps to the `sslmode `__ connection parameter, which allows a client to specify the type of TLS negotiation mode with the server. For more information on how each mode works, please visit the `PostgreSQL documentation `__. The default mode is ``prefer``. - **PATRONI\_REPLICATION\_SSLKEY**: (optional) maps to the `sslkey `__ connection parameter, which specifies the location of the secret key used with the client's certificate. - **PATRONI\_REPLICATION\_SSLCERT**: (optional) maps to the `sslcert `__ connection parameter, which specifies the location of the client certificate. - **PATRONI\_REPLICATION\_SSLROOTCERT**: (optional) maps to the `sslrootcert `__ connection parameter, which specifies the location of a file containing one ore more certificate authorities (CA) certificates that the client will use to verify a server's certificate. - **PATRONI\_REPLICATION\_SSLCRL**: (optional) maps to the `sslcrl `__ connection parameter, which specifies the location of a file containing a certificate revocation list. A client will reject connecting to any server that has a certificate present in this list. - **PATRONI\_SUPERUSER\_USERNAME**: name for the superuser, set during initialization (initdb) and later used by Patroni to connect to the postgres. Also this user is used by pg_rewind. - **PATRONI\_SUPERUSER\_PASSWORD**: password for the superuser, set during initialization (initdb). - **PATRONI\_SUPERUSER\_SSLMODE**: (optional) maps to the `sslmode `__ connection parameter, which allows a client to specify the type of TLS negotiation mode with the server. For more information on how each mode works, please visit the `PostgreSQL documentation `__. The default mode is ``prefer``. - **PATRONI\_SUPERUSER\_SSLKEY**: (optional) maps to the `sslkey `__ connection parameter, which specifies the location of the secret key used with the client's certificate. - **PATRONI\_SUPERUSER\_SSLCERT**: (optional) maps to the `sslcert `__ connection parameter, which specifies the location of the client certificate. - **PATRONI\_SUPERUSER\_SSLROOTCERT**: (optional) maps to the `sslrootcert `__ connection parameter, which specifies the location of a file containing one ore more certificate authorities (CA) certificates that the client will use to verify a server's certificate. - **PATRONI\_SUPERUSER\_SSLCRL**: (optional) maps to the `sslcrl `__ connection parameter, which specifies the location of a file containing a certificate revocation list. A client will reject connecting to any server that has a certificate present in this list. - **PATRONI\_REWIND\_USERNAME**: name for the user for ``pg_rewind``; the user will be created during initialization of postgres 11+ and all necessary `permissions `__ will be granted. - **PATRONI\_REWIND\_PASSWORD**: password for the user for ``pg_rewind``; the user will be created during initialization. - **PATRONI\_REWIND\_SSLMODE**: (optional) maps to the `sslmode `__ connection parameter, which allows a client to specify the type of TLS negotiation mode with the server. For more information on how each mode works, please visit the `PostgreSQL documentation `__. The default mode is ``prefer``. - **PATRONI\_REWIND\_SSLKEY**: (optional) maps to the `sslkey `__ connection parameter, which specifies the location of the secret key used with the client's certificate. - **PATRONI\_REWIND\_SSLCERT**: (optional) maps to the `sslcert `__ connection parameter, which specifies the location of the client certificate. - **PATRONI\_REWIND\_SSLROOTCERT**: (optional) maps to the `sslrootcert `__ connection parameter, which specifies the location of a file containing one ore more certificate authorities (CA) certificates that the client will use to verify a server's certificate. - **PATRONI\_REWIND\_SSLCRL**: (optional) maps to the `sslcrl `__ connection parameter, which specifies the location of a file containing a certificate revocation list. A client will reject connecting to any server that has a certificate present in this list. REST API -------- - **PATRONI\_RESTAPI\_CONNECT\_ADDRESS**: IP address and port to access the REST API. - **PATRONI\_RESTAPI\_LISTEN**: IP address and port that Patroni will listen to, to provide health-check information for HAProxy. - **PATRONI\_RESTAPI\_USERNAME**: Basic-auth username to protect unsafe REST API endpoints. - **PATRONI\_RESTAPI\_PASSWORD**: Basic-auth password to protect unsafe REST API endpoints. - **PATRONI\_RESTAPI\_CERTFILE**: Specifies the file with the certificate in the PEM format. If the certfile is not specified or is left empty, the API server will work without SSL. - **PATRONI\_RESTAPI\_KEYFILE**: Specifies the file with the secret key in the PEM format. - **PATRONI\_RESTAPI\_CAFILE**: Specifies the file with the CA_BUNDLE with certificates of trusted CAs to use while verifying client certs. - **PATRONI\_RESTAPI\_VERIFY\_CLIENT**: ``none``, ``optional`` or ``required``. When ``none`` REST API will not check client certificates. When ``required`` client certificates are required for all REST API calls. When ``optional`` client certificates are required for all unsafe REST API endpoints. If ``verify_client`` is set to ``optional`` or ``required`` basic-auth is not checked. CTL --- - **PATRONI\_CTL\_INSECURE**: Allow connections to REST API without verifying SSL certs. - **PATRONI\_CTL\_CACERT**: Specifies the file with the CA_BUNDLE file or directory with certificates of trusted CAs to use while verifying REST API SSL certs. If not provided patronictl will use the value provided for REST API "cafile" parameter. - **PATRONI\_CTL\_CERTFILE**: Specifies the file with the client certificate in the PEM format. If not provided patronictl will use the value provided for REST API "certfile" parameter. - **PATRONI\_CTL\_KEYFILE**: Specifies the file with the client secret key in the PEM format. If not provided patronictl will use the value provided for REST API "keyfile" parameter. patroni-1.6.4/docs/Makefile000066400000000000000000000011341361356115100155670ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXPROJ = Patroni SOURCEDIR = . BUILDDIR = build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) patroni-1.6.4/docs/README.rst000066400000000000000000000134321361356115100156220ustar00rootroot00000000000000.. _readme: ============ Introduction ============ Patroni originated as a fork of `Governor `__, the project from Compose. It includes plenty of new features. For an example of a Docker-based deployment with Patroni, see `Spilo `__, currently in use at Zalando. For additional background info, see: * `PostgreSQL HA with Kubernetes and Patroni `__, talk by Josh Berkus at KubeCon 2016 (video) * `Feb. 2016 Zalando Tech blog post `__ Development Status ------------------ Patroni is in active development and accepts contributions. See our :ref:`Contributing ` section below for more details. We report new releases information :ref:`here `. Technical Requirements/Installation ----------------------------------- **Pre-requirements for Mac OS** To install requirements on a Mac, run the following: :: brew install postgresql etcd haproxy libyaml python .. _psycopg2_install_options: **Psycopg2** Starting from `psycopg2-2.8 `__ the binary version of psycopg2 will no longer be installed by default. Installing it from the source code requires C compiler and postgres+python dev packages. Since in the python world it is not possible to specify dependency as ``psycopg2 OR psycopg2-binary`` you will have to decide how to install it. There are a few options available: 1. Use the package manager from your distro :: sudo apt-get install python-psycopg2 # install python2 psycopg2 module on Debian/Ubuntu sudo apt-get install python3-psycopg2 # install python3 psycopg2 module on Debian/Ubuntu sudo yum install python-psycopg2 # install python2 psycopg2 on RedHat/Fedora/CentOS 2. Install psycopg2 from the binary package :: pip install psycopg2-binary 3. Install psycopg2 from source :: pip install psycopg2>=2.5.4 **General installation for pip** Patroni can be installed with pip: :: pip install patroni[dependencies] where dependencies can be either empty, or consist of one or more of the following: etcd `python-etcd` module in order to use Etcd as DCS consul `python-consul` module in order to use Consul as DCS zookeeper `kazoo` module in order to use Zookeeper as DCS exhibitor `kazoo` module in order to use Exhibitor as DCS (same dependencies as for Zookeeper) kubernetes `kubernetes` module in order to use Kubernetes as DCS in Patroni aws `boto` in order to use AWS callbacks For example, the command in order to install Patroni together with dependencies for Etcd as a DCS and AWS callbacks is: :: pip install patroni[etcd,aws] Note that external tools to call in the replica creation or custom bootstrap scripts (i.e. WAL-E) should be installed independently of Patroni. .. _running_configuring: Running and Configuring ----------------------- The following section assumes Patroni repository as being cloned from https://github.com/zalando/patroni. Namely, you will need example configuration files `postgres0.yml` and `postgres1.yml`. If you installed Patroni with pip, you can obtain those files from the git repository and replace `./patroni.py` below with `patroni` command. To get started, do the following from different terminals: :: > etcd --data-dir=data/etcd > ./patroni.py postgres0.yml > ./patroni.py postgres1.yml You will then see a high-availability cluster start up. Test different settings in the YAML files to see how the cluster's behavior changes. Kill some of the components to see how the system behaves. Add more ``postgres*.yml`` files to create an even larger cluster. Patroni provides an `HAProxy `__ configuration, which will give your application a single endpoint for connecting to the cluster's leader. To configure, run: :: > haproxy -f haproxy.cfg :: > psql --host 127.0.0.1 --port 5000 postgres YAML Configuration ------------------ Go :ref:`here ` for comprehensive information about settings for etcd, consul, and ZooKeeper. And for an example, see `postgres0.yml `__. Environment Configuration ------------------------- Go :ref:`here ` for comprehensive information about configuring(overriding) settings via environment variables. Replication Choices ------------------- Patroni uses Postgres' streaming replication, which is asynchronous by default. Patroni's asynchronous replication configuration allows for ``maximum_lag_on_failover`` settings. This setting ensures failover will not occur if a follower is more than a certain number of bytes behind the leader. This setting should be increased or decreased based on business requirements. It's also possible to use synchronous replication for better durability guarantees. See :ref:`replication modes documentation ` for details. Applications Should Not Use Superusers -------------------------------------- When connecting from an application, always use a non-superuser. Patroni requires access to the database to function properly. By using a superuser from an application, you can potentially use the entire connection pool, including the connections reserved for superusers, with the ``superuser_reserved_connections`` setting. If Patroni cannot access the Primary because the connection pool is full, behavior will be undesirable. .. |Build Status| image:: https://travis-ci.org/zalando/patroni.svg?branch=master :target: https://travis-ci.org/zalando/patroni .. |Coverage Status| image:: https://coveralls.io/repos/zalando/patroni/badge.svg?branch=master :target: https://coveralls.io/r/zalando/patroni?branch=master patroni-1.6.4/docs/SETTINGS.rst000066400000000000000000000706641361356115100161370ustar00rootroot00000000000000.. _settings: =========================== YAML Configuration Settings =========================== .. _dynamic_configuration_settings: Dynamic configuration settings ------------------------------ Dynamic configuration is stored in the DCS (Distributed Configuration Store) and applied on all cluster nodes. Some parameters, like **loop_wait**, **ttl**, **postgresql.parameters.max_connections**, **postgresql.parameters.max_worker_processes** and so on could be set only in the dynamic configuration. Some other parameters like **postgresql.listen**, **postgresql.data_dir** could be set only locally, i.e. in the Patroni config file or via :ref:`configuration ` variable. In most cases the local configuration will override the dynamic configuration. In order to change the dynamic configuration you can use either ``patronictl edit-config`` tool or Patroni :ref:`REST API `. - **loop\_wait**: the number of seconds the loop will sleep. Default value: 10 - **ttl**: the TTL to acquire the leader lock (in seconds). Think of it as the length of time before initiation of the automatic failover process. Default value: 30 - **retry\_timeout**: timeout for DCS and PostgreSQL operation retries (in seconds). DCS or network issues shorter than this will not cause Patroni to demote the leader. Default value: 10 - **maximum\_lag\_on\_failover**: the maximum bytes a follower may lag to be able to participate in leader election. - **master\_start\_timeout**: the amount of time a master is allowed to recover from failures before failover is triggered (in seconds). Default is 300 seconds. When set to 0 failover is done immediately after a crash is detected if possible. When using asynchronous replication a failover can cause lost transactions. Worst case failover time for master failure is: loop\_wait + master\_start\_timeout + loop\_wait, unless master\_start\_timeout is zero, in which case it's just loop\_wait. Set the value according to your durability/availability tradeoff. - **synchronous\_mode**: turns on synchronous replication mode. In this mode a replica will be chosen as synchronous and only the latest leader and synchronous replica are able to participate in leader election. Synchronous mode makes sure that successfully committed transactions will not be lost at failover, at the cost of losing availability for writes when Patroni cannot ensure transaction durability. See :ref:`replication modes documentation ` for details. - **synchronous\_mode\_strict**: prevents disabling synchronous replication if no synchronous replicas are available, blocking all client writes to the master. See :ref:`replication modes documentation ` for details. - **postgresql**: - **use\_pg\_rewind**: whether or not to use pg_rewind. Defaults to `false`. - **use\_slots**: whether or not to use replication_slots. Defaults to `true` on PostgreSQL 9.4+. - **recovery\_conf**: additional configuration settings written to recovery.conf when configuring follower. There is no recovery.conf anymore in PostgreSQL 12, but you may continue using this section, because Patroni handles it transparently. - **parameters**: list of configuration settings for Postgres. - **standby\_cluster**: if this section is defined, we want to bootstrap a standby cluster. - **host**: an address of remote master - **port**: a port of remote master - **primary\_slot\_name**: which slot on the remote master to use for replication. This parameter is optional, the default value is derived from the instance name (see function `slot_name_from_member_name`). - **create\_replica\_methods**: an ordered list of methods that can be used to bootstrap standby leader from the remote master, can be different from the list defined in :ref:`postgresql_settings` - **restore\_command**: command to restore WAL records from the remote master to standby leader, can be different from the list defined in :ref:`postgresql_settings` - **archive\_cleanup\_command**: cleanup command for standby leader - **recovery\_min\_apply\_delay**: how long to wait before actually apply WAL records on a standby leader - **slots**: define permanent replication slots. These slots will be preserved during switchover/failover. Patroni will try to create slots before opening connections to the cluster. - **my_slot_name**: the name of replication slot. It is the responsibility of the operator to make sure that there are no clashes in names between replication slots automatically created by Patroni for members and permanent replication slots. - **type**: slot type. Could be ``physical`` or ``logical``. If the slot is logical, you have to additionally define ``database`` and ``plugin``. - **database**: the database name where logical slots should be created. - **plugin**: the plugin name for the logical slot. Global/Universal ---------------- - **name**: the name of the host. Must be unique for the cluster. - **namespace**: path within the configuration store where Patroni will keep information about the cluster. Default value: "/service" - **scope**: cluster name Log --- - **level**: sets the general logging level. Default value is **INFO** (see `the docs for Python logging `_) - **traceback\_level**: sets the level where tracebacks will be visible. Default value is **ERROR**. Set it to **DEBUG** if you want to see tracebacks only if you enable **log.level=DEBUG**. - **format**: sets the log formatting string. Default value is **%(asctime)s %(levelname)s: %(message)s** (see `the LogRecord attributes `_) - **dateformat**: sets the datetime formatting string. (see the `formatTime() documentation `_) - **max\_queue\_size**: Patroni is using two-step logging. Log records are written into the in-memory queue and there is a separate thread which pulls them from the queue and writes to stderr or file. The maximum size of the internal queue is limited by default by **1000** records, which is enough to keep logs for the past 1h20m. - **dir**: Directory to write application logs to. The directory must exist and be writable by the user executing Patroni. If you set this value, the application will retain 4 25MB logs by default. You can tune those retention values with `file_num` and `file_size` (see below). - **file\_num**: The number of application logs to retain. - **file\_size**: Size of patroni.log file (in bytes) that triggers a log rolling. - **loggers**: This section allows redefining logging level per python module - **patroni.postmaster: WARNING** - **urllib3: DEBUG** .. _bootstrap_settings: Bootstrap configuration ----------------------- - **dcs**: This section will be written into `///config` of the given configuration store after initializing of new cluster. The global dynamic configuration for the cluster. Under the ``bootstrap.dcs`` you can put any of the parameters described in the :ref:`Dynamic Configuration settings ` and after Patroni initialized (bootstrapped) the new cluster, it will write this section into `///config` of the configuration store. All later changes of ``bootstrap.dcs`` will not take any effect! If you want to change them please use either ``patronictl edit-config`` or Patroni :ref:`REST API `. - **method**: custom script to use for bootstrapping this cluster. See :ref:`custom bootstrap methods documentation ` for details. When ``initdb`` is specified revert to the default ``initdb`` command. ``initdb`` is also triggered when no ``method`` parameter is present in the configuration file. - **initdb**: List options to be passed on to initdb. - **- data-checksums**: Must be enabled when pg_rewind is needed on 9.3. - **- encoding: UTF8**: default encoding for new databases. - **- locale: UTF8**: default locale for new databases. - **pg\_hba**: list of lines that you should add to pg\_hba.conf. - **- host all all 0.0.0.0/0 md5**. - **- host replication replicator 127.0.0.1/32 md5**: A line like this is required for replication. - **users**: Some additional users which need to be created after initializing new cluster - **admin**: the name of user - **password: zalando**: - **options**: list of options for CREATE USER statement - **- createrole** - **- createdb** - **post\_bootstrap** or **post\_init**: An additional script that will be executed after initializing the cluster. The script receives a connection string URL (with the cluster superuser as a user name). The PGPASSFILE variable is set to the location of pgpass file. .. _consul_settings: Consul ------ Most of the parameters are optional, but you have to specify one of the **host** or **url** - **host**: the host:port for the Consul endpoint, in format: http(s)://host:port - **url**: url for the Consul endpoint - **port**: (optional) Consul port - **scheme**: (optional) **http** or **https**, defaults to **http** - **token**: (optional) ACL token - **verify**: (optional) whether to verify the SSL certificate for HTTPS requests - **cacert**: (optional) The ca certificate. If present it will enable validation. - **cert**: (optional) file with the client certificate - **key**: (optional) file with the client key. Can be empty if the key is part of **cert**. - **dc**: (optional) Datacenter to communicate with. By default the datacenter of the host is used. - **consistency**: (optional) Select consul consistency mode. Possible values are ``default``, ``consistent``, or ``stale`` (more details in `consul API reference `__) - **checks**: (optional) list of Consul health checks used for the session. By default an empty list is used. - **register\_service**: (optional) whether or not to register a service with the name defined by the scope parameter and the tag master, replica or standby-leader depending on the node's role. Defaults to **false** - **service\_check\_interval**: (optional) how often to perform health check against registered url Etcd ---- Most of the parameters are optional, but you have to specify one of the **host**, **hosts**, **url**, **proxy** or **srv** - **host**: the host:port for the etcd endpoint. - **hosts**: list of etcd endpoint in format host1:port1,host2:port2,etc... Could be a comma separated string or an actual yaml list. - **use\_proxies**: If this parameter is set to true, Patroni will consider **hosts** as a list of proxies and will not perform a topology discovery of etcd cluster. - **url**: url for the etcd - **proxy**: proxy url for the etcd. If you are connecting to the etcd using proxy, use this parameter instead of **url** - **srv**: Domain to search the SRV record(s) for cluster autodiscovery. - **protocol**: (optional) http or https, if not specified http is used. If the **url** or **proxy** is specified - will take protocol from them. - **username**: (optional) username for etcd authentication. - **password**: (optional) password for etcd authentication. - **cacert**: (optional) The ca certificate. If present it will enable validation. - **cert**: (optional) file with the client certificate. - **key**: (optional) file with the client key. Can be empty if the key is part of **cert**. ZooKeeper ---------- - **hosts**: list of ZooKeeper cluster members in format: ['host1:port1', 'host2:port2', 'etc...']. Exhibitor --------- - **hosts**: initial list of Exhibitor (ZooKeeper) nodes in format: 'host1,host2,etc...'. This list updates automatically whenever the Exhibitor (ZooKeeper) cluster topology changes. - **poll\_interval**: how often the list of ZooKeeper and Exhibitor nodes should be updated from Exhibitor - **port**: Exhibitor port. .. _kubernetes_settings: Kubernetes ---------- - **namespace**: (optional) Kubernetes namespace where Patroni pod is running. Default value is `default`. - **labels**: Labels in format ``{label1: value1, label2: value2}``. These labels will be used to find existing objects (Pods and either Endpoints or ConfigMaps) associated with the current cluster. Also Patroni will set them on every object (Endpoint or ConfigMap) it creates. - **scope\_label**: (optional) name of the label containing cluster name. Default value is `cluster-name`. - **role\_label**: (optional) name of the label containing role (master or replica). Patroni will set this label on the pod it runs in. Default value is ``role``. - **use\_endpoints**: (optional) if set to true, Patroni will use Endpoints instead of ConfigMaps to run leader elections and keep cluster state. - **pod\_ip**: (optional) IP address of the pod Patroni is running in. This value is required when `use_endpoints` is enabled and is used to populate the leader endpoint subsets when the pod's PostgreSQL is promoted. - **ports**: (optional) if the Service object has the name for the port, the same name must appear in the Endpoint object, otherwise service won't work. For example, if your service is defined as ``{Kind: Service, spec: {ports: [{name: postgresql, port: 5432, targetPort: 5432}]}}``, then you have to set ``kubernetes.ports: [{"name": "postgresql", "port": 5432}]`` and Patroni will use it for updating subsets of the leader Endpoint. This parameter is used only if `kubernetes.use_endpoints` is set. .. _postgresql_settings: PostgreSQL ---------- - **authentication**: - **superuser**: - **username**: name for the superuser, set during initialization (initdb) and later used by Patroni to connect to the postgres. - **password**: password for the superuser, set during initialization (initdb). - **sslmode**: (optional) maps to the `sslmode `__ connection parameter, which allows a client to specify the type of TLS negotiation mode with the server. For more information on how each mode works, please visit the `PostgreSQL documentation `__. The default mode is ``prefer``. - **sslkey**: (optional) maps to the `sslkey `__ connection parameter, which specifies the location of the secret key used with the client's certificate. - **sslcert**: (optional) maps to the `sslcert `__ connection parameter, which specifies the location of the client certificate. - **sslrootcert**: (optional) maps to the `sslrootcert `__ connection parameter, which specifies the location of a file containing one ore more certificate authorities (CA) certificates that the client will use to verify a server's certificate. - **sslcrl**: (optional) maps to the `sslcrl `__ connection parameter, which specifies the location of a file containing a certificate revocation list. A client will reject connecting to any server that has a certificate present in this list. - **replication**: - **username**: replication username; the user will be created during initialization. Replicas will use this user to access master via streaming replication - **password**: replication password; the user will be created during initialization. - **sslmode**: (optional) maps to the `sslmode `__ connection parameter, which allows a client to specify the type of TLS negotiation mode with the server. For more information on how each mode works, please visit the `PostgreSQL documentation `__. The default mode is ``prefer``. - **sslkey**: (optional) maps to the `sslkey `__ connection parameter, which specifies the location of the secret key used with the client's certificate. - **sslcert**: (optional) maps to the `sslcert `__ connection parameter, which specifies the location of the client certificate. - **sslrootcert**: (optional) maps to the `sslrootcert `__ connection parameter, which specifies the location of a file containing one ore more certificate authorities (CA) certificates that the client will use to verify a server's certificate. - **sslcrl**: (optional) maps to the `sslcrl `__ connection parameter, which specifies the location of a file containing a certificate revocation list. A client will reject connecting to any server that has a certificate present in this list. - **rewind**: - **username**: name for the user for ``pg_rewind``; the user will be created during initialization of postgres 11+ and all necessary `permissions `__ will be granted. - **password**: password for the user for ``pg_rewind``; the user will be created during initialization. - **sslmode**: (optional) maps to the `sslmode `__ connection parameter, which allows a client to specify the type of TLS negotiation mode with the server. For more information on how each mode works, please visit the `PostgreSQL documentation `__. The default mode is ``prefer``. - **sslkey**: (optional) maps to the `sslkey `__ connection parameter, which specifies the location of the secret key used with the client's certificate. - **sslcert**: (optional) maps to the `sslcert `__ connection parameter, which specifies the location of the client certificate. - **sslrootcert**: (optional) maps to the `sslrootcert `__ connection parameter, which specifies the location of a file containing one ore more certificate authorities (CA) certificates that the client will use to verify a server's certificate. - **sslcrl**: (optional) maps to the `sslcrl `__ connection parameter, which specifies the location of a file containing a certificate revocation list. A client will reject connecting to any server that has a certificate present in this list. - **callbacks**: callback scripts to run on certain actions. Patroni will pass the action, role and cluster name. (See scripts/aws.py as an example of how to write them.) - **on\_reload**: run this script when configuration reload is triggered. - **on\_restart**: run this script when the postgres restarts (without changing role). - **on\_role\_change**: run this script when the postgres is being promoted or demoted. - **on\_start**: run this script when the postgres starts. - **on\_stop**: run this script when the postgres stops. - **connect\_address**: IP address + port through which Postgres is accessible from other nodes and applications. - **create\_replica\_methods**: an ordered list of the create methods for turning a Patroni node into a new replica. "basebackup" is the default method; other methods are assumed to refer to scripts, each of which is configured as its own config item. See :ref:`custom replica creation methods documentation ` for further explanation. - **data\_dir**: The location of the Postgres data directory, either :ref:`existing ` or to be initialized by Patroni. - **config\_dir**: The location of the Postgres configuration directory, defaults to the data directory. Must be writable by Patroni. - **bin\_dir**: Path to PostgreSQL binaries (pg_ctl, pg_rewind, pg_basebackup, postgres). The default value is an empty string meaning that PATH environment variable will be used to find the executables. - **listen**: IP address + port that Postgres listens to; must be accessible from other nodes in the cluster, if you're using streaming replication. Multiple comma-separated addresses are permitted, as long as the port component is appended after to the last one with a colon, i.e. ``listen: 127.0.0.1,127.0.0.2:5432``. Patroni will use the first address from this list to establish local connections to the PostgreSQL node. - **use\_unix\_socket**: specifies that Patroni should prefer to use unix sockets to connect to the cluster. Default value is ``false``. If ``unix_socket_directories`` is defined, Patroni will use the first suitable value from it to connect to the cluster and fallback to tcp if nothing is suitable. If ``unix_socket_directories`` is not specified in ``postgresql.parameters``, Patroni will assume that the default value should be used and omit ``host`` from the connection parameters. - **pgpass**: path to the `.pgpass `__ password file. Patroni creates this file before executing pg\_basebackup, the post_init script and under some other circumstances. The location must be writable by Patroni. - **recovery\_conf**: additional configuration settings written to recovery.conf when configuring follower. - **custom\_conf** : path to an optional custom ``postgresql.conf`` file, that will be used in place of ``postgresql.base.conf``. The file must exist on all cluster nodes, be readable by PostgreSQL and will be included from its location on the real ``postgresql.conf``. Note that Patroni will not monitor this file for changes, nor backup it. However, its settings can still be overridden by Patroni's own configuration facilities - see :ref:`dynamic configuration ` for details. - **parameters**: list of configuration settings for Postgres. Many of these are required for replication to work. - **pg\_hba**: list of lines that Patroni will use to generate ``pg_hba.conf``. This parameter has higher priority than ``bootstrap.pg_hba``. Together with :ref:`dynamic configuration ` it simplifies management of ``pg_hba.conf``. - **- host all all 0.0.0.0/0 md5**. - **- host replication replicator 127.0.0.1/32 md5**: A line like this is required for replication. - **pg\_ident**: list of lines that Patroni will use to generate ``pg_ident.conf``. Together with :ref:`dynamic configuration ` it simplifies management of ``pg_ident.conf``. - **- mapname1 systemname1 pguser1**. - **- mapname1 systemname2 pguser2**. - **pg\_ctl\_timeout**: How long should pg_ctl wait when doing ``start``, ``stop`` or ``restart``. Default value is 60 seconds. - **use\_pg\_rewind**: try to use pg\_rewind on the former leader when it joins cluster as a replica. - **remove\_data\_directory\_on\_rewind\_failure**: If this option is enabled, Patroni will remove the PostgreSQL data directory and recreate the replica. Otherwise it will try to follow the new leader. Default value is **false**. - **remove\_data\_directory\_on\_diverged\_timelines**: Patroni will remove the PostgreSQL data directory and recreate the replica if it notices that timelines are diverging and the former master can not start streaming from the new master. This option is useful when ``pg_rewind`` can not be used. Default value is **false**. - **replica\_method**: for each create_replica_methods other than basebackup, you would add a configuration section of the same name. At a minimum, this should include "command" with a full path to the actual script to be executed. Other configuration parameters will be passed along to the script in the form "parameter=value". REST API -------- - **connect\_address**: IP address (or hostname) and port, to access the Patroni's :ref:`REST API `. All the members of the cluster must be able to connect to this address, so unless the Patroni setup is intended for a demo inside the localhost, this address must be a non "localhost" or loopback address (ie: "localhost" or "127.0.0.1"). It can serve as an endpoint for HTTP health checks (read below about the "listen" REST API parameter), and also for user queries (either directly or via the REST API), as well as for the health checks done by the cluster members during leader elections (for example, to determine whether the master is still running, or if there is a node which has a WAL position that is ahead of the one doing the query; etc.) The connect_address is put in the member key in DCS, making it possible to translate the member name into the address to connect to its REST API. - **listen**: IP address (or hostname) and port that Patroni will listen to for the REST API - to provide also the same health checks and cluster messaging between the participating nodes, as described above. to provide health-check information for HAProxy (or any other load balancer capable of doing a HTTP "OPTION" or "GET" checks). - **Optional**: - **authentication**: - **username**: Basic-auth username to protect unsafe REST API endpoints. - **password**: Basic-auth password to protect unsafe REST API endpoints. - **certfile**: Specifies the file with the certificate in the PEM format. If the certfile is not specified or is left empty, the API server will work without SSL. - **keyfile**: Specifies the file with the secret key in the PEM format. - **cafile**: Specifies the file with the CA_BUNDLE with certificates of trusted CAs to use while verifying client certs. - **verify\_client**: ``none``, ``optional`` or ``required``. When ``none`` REST API will not check client certificates. When ``required`` client certificates are required for all REST API calls. When ``optional`` client certificates are required for all unsafe REST API endpoints. If ``verify_client`` is set to ``optional`` or ``required`` basic-auth is not checked. .. _patronictl_settings: CTL --- - **Optional**: - **insecure**: Allow connections to REST API without verifying SSL certs. - **cacert**: Specifies the file with the CA_BUNDLE file or directory with certificates of trusted CAs to use while verifying REST API SSL certs. If not provided patronictl will use the value provided for REST API "cafile" parameter. - **certfile**: Specifies the file with the client certificate in the PEM format. If not provided patronictl will use the value provided for REST API "certfile" parameter. - **keyfile**: Specifies the file with the client secret key in the PEM format. If not provided patronictl will use the value provided for REST API "keyfile" parameter. Watchdog -------- - **mode**: ``off``, ``automatic`` or ``required``. When ``off`` watchdog is disabled. When ``automatic`` watchdog will be used if available, but ignored if it is not. When ``required`` the node will not become a leader unless watchdog can be successfully enabled. - **device**: Path to watchdog device. Defaults to ``/dev/watchdog``. - **safety_margin**: Number of seconds of safety margin between watchdog triggering and leader key expiration. Tags ---- - **nofailover**: ``true`` or ``false``, controls whether this node is allowed to participate in the leader race and become a leader. Defaults to ``false`` - **clonefrom**: ``true`` or ``false``. If set to ``true`` other nodes might prefer to use this node for bootstrap (take ``pg_basebackup`` from). If there are several nodes with ``clonefrom`` tag set to ``true`` the node to bootstrap from will be chosen randomly. The default value is ``false``. - **noloadbalance**: ``true`` or ``false``. If set to ``true`` the node will return HTTP Status Code 503 for the ``GET /replica`` REST API health-check and therefore will be excluded from the load-balancing. Defaults to ``false``. - **replicatefrom**: The IP address/hostname of another replica. Used to support cascading replication. - **nosync**: ``true`` or ``false``. If set to ``true`` the node will never be selected as a synchronous replica. patroni-1.6.4/docs/_static/000077500000000000000000000000001361356115100155565ustar00rootroot00000000000000patroni-1.6.4/docs/_static/custom.css000066400000000000000000000000371361356115100176020ustar00rootroot00000000000000li { margin-bottom: 0.5em } patroni-1.6.4/docs/conf.py000066400000000000000000000135261361356115100154360ustar00rootroot00000000000000#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Patroni documentation build configuration file, created by # sphinx-quickstart on Mon Dec 19 16:54:09 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('..')) from patroni.version import __version__ # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Patroni' copyright = '2015 Compose, Zalando SE' author = 'Zalando SE' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = __version__[:__version__.rfind('.')] # The full version, including alpha/beta/rc tags. release = __version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if not on_rtd: # only import and set the theme if we're building docs locally import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'Patronidoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'Patroni.tex', 'Patroni Documentation', 'Zalando SE', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'patroni', 'Patroni Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'Patroni', 'Patroni Documentation', author, 'Patroni', 'One line description of project.', 'Miscellaneous'), ] # -- Options for Epub output ---------------------------------------------- # Bibliographic Dublin Core info. epub_title = project epub_author = author epub_publisher = author epub_copyright = copyright # The unique identifier of the text. This can be a ISBN number # or the project homepage. # # epub_identifier = '' # A unique identification for the text. # # epub_uid = '' # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'https://docs.python.org/': None} # A possibility to have an own stylesheet, to add new rules or override existing ones # For the latter case, the CSS specificity of the rules should be higher than the default ones def setup(app): app.add_stylesheet("custom.css") patroni-1.6.4/docs/dynamic_configuration.rst000066400000000000000000000134571361356115100212470ustar00rootroot00000000000000.. _dynamic_configuration: Patroni configuration ===================== Patroni configuration is stored in the DCS (Distributed Configuration Store). There are 3 types of configuration: - Dynamic configuration. These options can be set in DCS at any time. If the options changed are not part of the startup configuration, they are applied asynchronously (upon the next wake up cycle) to every node, which gets subsequently reloaded. If the node requires a restart to apply the configuration (for options with context postmaster, if their values have changed), a special flag, ``pending_restart`` indicating this, is set in the members.data JSON. Additionally, the node status also indicates this, by showing ``"restart_pending": true``. - Local :ref:`configuration ` (patroni.yml). These options are defined in the configuration file and take precedence over dynamic configuration. patroni.yml could be changed and reloaded in runtime (without restart of Patroni) by sending SIGHUP to the Patroni process, performing ``POST /reload`` REST-API request or executing ``patronictl reload``. - Environment :ref:`configuration `. It is possible to set/override some of the "Local" configuration parameters with environment variables. Environment configuration is very useful when you are running in a dynamic environment and you don't know some of the parameters in advance (for example it's not possible to know your external IP address when you are running inside ``docker``). Some of the PostgreSQL parameters must hold the same values on the master and the replicas. For those, values set either in the local patroni configuration files or via the environment variables take no effect. To alter or set their values one must change the shared configuration in the DCS. Below is the actual list of such parameters together with the default values: - max_connections: 100 - max_locks_per_transaction: 64 - max_worker_processes: 8 - max_prepared_transactions: 0 - wal_level: hot_standby - wal_log_hints: on - track_commit_timestamp: off For the parameters below, PostgreSQL does not require equal values among the master and all the replicas. However, considering the possibility of a replica to become the master at any time, it doesn't really make sense to set them differently; therefore, Patroni restricts setting their values to the Dynamic configuration - max_wal_senders: 5 - max_replication_slots: 5 - wal_keep_segments: 8 These parameters are validated to ensure they are sane, or meet a minimum value. There are some other Postgres parameters controlled by Patroni: - listen_addresses - is set either from ``postgresql.listen`` or from ``PATRONI_POSTGRESQL_LISTEN`` environment variable - port - is set either from ``postgresql.listen`` or from ``PATRONI_POSTGRESQL_LISTEN`` environment variable - cluster_name - is set either from ``scope`` or from ``PATRONI_SCOPE`` environment variable - hot_standby: on To be on the safe side parameters from the above lists are not written into ``postgresql.conf``, but passed as a list of arguments to the ``pg_ctl start`` which gives them the highest precedence, even above `ALTER SYSTEM `__ When applying the local or dynamic configuration options, the following actions are taken: - The node first checks if there is a postgresql.base.conf or if the ``custom_conf`` parameter is set. - If the `custom_conf` parameter is set, it will take the file specified on it as a base configuration, ignoring `postgresql.base.conf` and `postgresql.conf`. - If the `custom_conf` parameter is not set and `postgresql.base.conf` exists, it contains the renamed "original" configuration and it will be used as a base configuration. - If there is no `custom_conf` nor `postgresql.base.conf`, the original postgresql.conf is taken and renamed to postgresql.base.conf. - The dynamic options (with the exceptions above) are dumped into the postgresql.conf and an include is set in postgresql.conf to the used base configuration (either postgresql.base.conf or what is on ``custom_conf``). Therefore, we would be able to apply new options without re-reading the configuration file to check if the include is present not. - Some parameters that are essential for Patroni to manage the cluster are overridden using the command line. - If some of the options that require restart are changed (we should look at the context in pg_settings and at the actual values of those options), a pending_restart flag of a given node is set. This flag is reset on any restart. The parameters would be applied in the following order (run-time are given the highest priority): 1. load parameters from file `postgresql.base.conf` (or from a `custom_conf` file, if set) 2. load parameters from file `postgresql.conf` 3. load parameters from file `postgresql.auto.conf` 4. run-time parameter using `-o --name=value` This allows configuration for all the nodes (2), configuration for a specific node using `ALTER SYSTEM` (3) and ensures that parameters essential to the running of Patroni are enforced (4), as well as leaves room for configuration tools that manage `postgresql.conf` directly without involving Patroni (1). Also, the following Patroni configuration options can be changed only dynamically: - ttl: 30 - loop_wait: 10 - retry_timeouts: 10 - maximum_lag_on_failover: 1048576 - check_timeline: false - postgresql.use_slots: true Upon changing these options, Patroni will read the relevant section of the configuration stored in DCS and change its run-time values. Patroni nodes are dumping the state of the DCS options to disk upon for every change of the configuration into the file ``patroni.dynamic.json`` located in the Postgres data directory. Only the master is allowed to restore these options from the on-disk dump if these are completely absent from the DCS or if they are invalid. patroni-1.6.4/docs/existing_data.rst000066400000000000000000000042511361356115100175070ustar00rootroot00000000000000.. _existing_data: Convert a Standalone to a Patroni Cluster ========================================= This section describes the process for converting a standalone PostgreSQL instance into a Patroni cluster. To deploy a Patroni cluster without using a pre-existing PostgreSQL instance, see :ref:`Running and Configuring ` instead. Procedure --------- A Patroni cluster can be started with a data directory from a single-node PostgreSQL database. This is achieved by following closely these steps: 1. Manually start PostgreSQL daemon 2. Create Patroni superuser and replication users as defined in the :ref:`authentication ` section of the Patroni configuration. If this user is created in SQL, the following queries achieve this: .. code-block:: sql CREATE USER $PATRONI_SUPERUSER_USERNAME WITH SUPERUSER ENCRYPTED PASSWORD '$PATRONI_SUPERUSER_PASSWORD'; CREATE USER $PATRONI_REPLICATION_USERNAME WITH REPLICATION ENCRYPTED PASSWORD '$PATRONI_REPLICATION_PASSWORD'; 3. Start Patroni (e.g. ``patroni /etc/patroni/patroni.yml``). It automatically detects that PostgreSQL daemon is already running but its configuration might be out-of-date. 4. Ask Patroni to restart the node with ``patronictl restart cluster-name node-name``. This step is only required if PostgreSQL configuration is out-of-date. FAQ --- - During Patroni startup, Patroni complains that it cannot bind to the PostgreSQL port. You need to verify ``listen_addresses`` and ``port`` in ``postgresql.conf`` and ``postgresql.listen`` in ``patroni.yml``. Don't forget that ``pg_hba.conf`` should allow such access. - After asking Patroni to restart the node, PostgreSQL displays the error message ``could not open configuration file "/etc/postgresql/10/main/pg_hba.conf": No such file or directory`` It can mean various things depending on how you manage PostgreSQL configuration. If you specified `postgresql.config_dir`, Patroni generates the ``pg_hba.conf`` based on the settings in the :ref:`bootstrap ` section only when it bootstraps a new cluster. In this scenario the ``PGDATA`` was not empty, therefore no bootstrap happened. This file must exist beforehand. patroni-1.6.4/docs/ha_loop_diagram.dot000066400000000000000000000177341361356115100177610ustar00rootroot00000000000000// Graphviz source for ha_loop_diagram.png // recompile with: // dot -Tpng ha_loop_diagram.dot -o ha_loop_diagram.png digraph G { rankdir=TB; fontname="sans-serif"; penwidth="0.3"; layout="dot"; newrank=true; edge [fontname="sans-serif", fontsize=12, color=black, fontcolor=black]; node [fontname=serif, fontsize=12, fillcolor=white, color=black, fontcolor=black, style=filled]; "start" [label=Start, shape="rectangle", fillcolor="green"] "start" -> "load_cluster_from_dcs"; "update_member" [label="Persist node state in DCS"] "update_member" -> "start" subgraph cluster_run_cycle { label="run_cycle" "load_cluster_from_dcs" [label="Load cluster from DCS"]; "touch_member" [label="Persist node in DCS"]; "cluster.has_member" [shape="diamond", label="Is node registered on DCS?"] "cluster.has_member" -> "touch_member" [label="no" color="red"] "long_action_in_progress?" [shape="diamond" label="Is the PostgreSQL currently being\nstopping/starting/restarting/reinitializing?"] "load_cluster_from_dcs" -> "cluster.has_member"; "touch_member" -> "long_action_in_progress?"; "cluster.has_member" -> "long_action_in_progress?" [label="yes" color="green"]; "long_action_in_progress?" -> "recovering?" [label="no" color="red"] "recovering?" [label="Was cluster recovering and failed?", shape="diamond"]; "recovering?" -> "post_recover" [label="yes" color="green"]; "recovering?" -> "data_directory_empty" [label="no" color="red"]; "post_recover" [label="Remove leader key (if I was the leader)"]; "data_directory_empty" [label="Is data folder empty?", shape="diamond"]; "data_directory_empty" -> "cluster_initialize" [label="no" color="red"]; "data_belongs_to_cluster" [label="Does data dir belong to cluster?", shape="diamond"]; "data_belongs_to_cluster" -> "exit" [label="no" color="red"]; "data_belongs_to_cluster" -> "is_healthy" [label="yes" color="green"] "exit" [label="Fail and exit", fillcolor=red]; "cluster_initialize" [label="Is cluster initialized on DCS?" shape="diamond"] "cluster_initialize" -> "cluster.has_leader" [label="no" color="red"] "cluster.has_leader" [label="Does the cluster has leader?", shape="diamond"] "cluster.has_leader" -> "dcs.initialize" [label="no", color="red"] "cluster.has_leader" -> "is_healthy" [label="yes", color="green"] "cluster_initialize" -> "data_belongs_to_cluster" [label="yes" color="green"] "dcs.initialize" [label="Initialize new cluster"]; "dcs.initialize" -> "is_healthy" "is_healthy" [label="Is node healthy?\n(running Postgres)", shape="diamond"]; "recover" [label="Start as read-only\nand set Recover flag"] "is_healthy" -> "recover" [label="no" color="red"]; "is_healthy" -> "cluster.is_unlocked" [label="yes" color="green"]; "cluster.is_unlocked" [label="Does the cluster has a leader?", shape="diamond"] } "post_recover" -> "update_member" "recover" -> "update_member" "long_action_in_progress?" -> "async_has_lock?" [label="yes" color="green"]; "cluster.is_unlocked" -> "unhealthy_is_healthiest" [label="no" color="red"] "cluster.is_unlocked" -> "healthy_has_lock" [label="yes" color="green"] "data_directory_empty" -> "bootstrap.is_unlocked" [label="yes" color="green"] subgraph cluster_async { label = "Long action in progress\n(Start/Stop/Restart/Reinitialize)" "async_has_lock?" [label="Do I have the leader lock?", shape="diamond"] "async_update_lock" [label="Renew leader lock"] "async_has_lock?" -> "async_update_lock" [label="yes" color="green"] } "async_update_lock" -> "update_member" "async_has_lock?" -> "update_member" [label="no" color="red"] subgraph cluster_bootstrap { label = "Node bootstrap"; "bootstrap.is_unlocked" [label="Does the cluster has a leader?", shape="diamond"] "bootstrap.is_initialized" [label="Does the cluster has an initialize key?", shape="diamond"] "bootstrap.is_unlocked" -> "bootstrap.is_initialized" [label="no" color="red"] "bootstrap.is_unlocked" -> "bootstrap.select_node" [label="yes" color="green"] "bootstrap.select_node" [label="Select a node to take a backup from"] "bootstrap.do_bootstrap" [label="Run pg_basebackup\n(async)"] "bootstrap.select_node" -> "bootstrap.do_bootstrap" "bootstrap.is_initialized" -> "bootstrap.initialization_race" [label="no" color="red"] "bootstrap.is_initialized" -> "bootstrap.wait_for_leader" [label="yes" color="green"] "bootstrap.initialization_race" [label="Race for initialize key"] "bootstrap.initialization_race" -> "bootstrap.won_initialize_race?" "bootstrap.won_initialize_race?" [label="Do I won initialize race?", shape="diamond"] "bootstrap.won_initialize_race?" -> "bootstrap.initdb_and_start" [label="yes" color="green"] "bootstrap.won_initialize_race?" -> "bootstrap.wait_for_leader" [label="no" color="red"] "bootstrap.wait_for_leader" [label="Need to wait for leader key"] "bootstrap.initdb_and_start" [label="Run initdb, start postgres and create roles"] "bootstrap.initdb_and_start" -> "bootstrap.success?" "bootstrap.success?" [label="Success", shape="diamond"] "bootstrap.success?" -> "bootstrap.take_leader_key" [label="yes" color="green"] "bootstrap.success?" -> "bootstrap.clean" [label="no" color="red"] "bootstrap.clean" [label="Remove initialize key from DCS\nand data directory from filesystem"] "bootstrap.take_leader_key" [label="Take a leader key in DCS"] } "bootstrap.do_bootstrap" -> "update_member" "bootstrap.wait_for_leader" -> "update_member" "bootstrap.clean" -> "update_member" "bootstrap.take_leader_key" -> "update_member" subgraph cluster_process_healthy_cluster { label = "process_healthy_cluster" "healthy_has_lock" [label="Am I the owner of the leader lock?", shape=diamond] "healthy_is_leader" [label="Is Postgres running as master?", shape=diamond] "healthy_no_lock" [label="Follow the leader (async,\ncreate/update recovery.conf and restart if necessary)"] "healthy_has_lock" -> "healthy_no_lock" [label="no" color="red"] "healthy_has_lock" -> "healthy_update_leader_lock" [label="yes" color="green"] "healthy_update_leader_lock" [label="Try to update leader lock"] "healthy_update_leader_lock" -> "healthy_update_success" "healthy_update_success" [label="Success?", shape=diamond] "healthy_update_success" -> "healthy_is_leader" [label="yes" color="green"] "healthy_update_success" -> "healthy_demote" [label="no" color="red"] "healthy_demote" [label="Demote (async,\nrestart in read-only)"] "healthy_failover" [label="Promote Postgres to master"] "healthy_is_leader" -> "healthy_failover" [label="no" color="red"] } "healthy_demote" -> "update_member" "healthy_is_leader" -> "update_member" [label="yes" color="green"] "healthy_failover" -> "update_member" "healthy_no_lock" -> "update_member" subgraph cluster_process_unhealthy_cluster { label = "process_unhealthy_cluster" "unhealthy_is_healthiest" [label="Am I the healthiest node?", shape="diamond"] "unhealthy_is_healthiest" -> "unhealthy_leader_race" [label="yes", color="green"] "unhealthy_leader_race" [label="Try to create leader key"] "unhealthy_leader_race" -> "unhealthy_acquire_lock" "unhealthy_acquire_lock" [label="Was I able to get the lock?", shape="diamond"] "unhealthy_is_leader" [label="Is Postgres running as master?", shape=diamond] "unhealthy_acquire_lock" -> "unhealthy_is_leader" [label="yes" color="green"] "unhealthy_is_leader" -> "unhealthy_promote" [label="no" color="red"] "unhealthy_promote" [label="Promote to master"] "unhealthy_is_healthiest" -> "unhealthy_follow" [label="no" color="red"] "unhealthy_follow" [label="try to follow somebody else()"] "unhealthy_acquire_lock" -> "unhealthy_follow" [label="no" color="red"] } "unhealthy_follow" -> "update_member" "unhealthy_promote" -> "update_member" "unhealthy_is_leader" -> "update_member" [label="yes" color="green"] } patroni-1.6.4/docs/ha_loop_diagram.png000066400000000000000000017655351361356115100177710ustar00rootroot00000000000000PNG  IHDR :&bKGD IDATx|u}/WI4MiՊ(9G[ ^.^眛wm:':dڼ!RPeMpi! h{|<#9=wmSywP7+]/@8#0LtDz?tG38#---.(UCCCC.X-7*]o}keIF0@8#03!@8#03!@8#03!@8#0wCINrVYI&%9-'眭: c@xI1UI'ِI%ɩ 8d&T^Nr?'yMORE !ݮ$3FiU}-޳'i˳$O9,=$&)w߁pPw+^87=Iޕ$w$=IpPLtD_L$K<h|$5GI~\3!`/t$$NxEw$[ȳSBF/ẇжmr577CU8dL8ZH֟ޞɳ˒|9ɇ|7Ioq~/`{r 7wܑ -rJCB h4%N 9_$w$Yzi/իs7k? epp|nڴi-*ɓIZs$SG9;;|#9Ȟzvm|';wĉgϞ$φDk6GI>eIjIS\$[zUz/`ߟԔ 5yM;L4)) nr9sR[[Cjhhho]tQ57+]deV.˧?|;IUUC>O6۷oώ;}e۴i2}̜93ӧOό3FJfΜ9l?qă+IBlذ!]]]YvmՕ;(2Bgz{{|%Wue„ cClْYfK)۳ul߾=۶mKX,;SLyJ_ܯG=h˖-Bk׮ͺuݝlذZICCCp'?#/D d, n__KMMM3LaαslݺuXHdhgG\oҤI##w\f͚Yfeܹ={vy_Z־8BG֯_lذ!({l߾|P(!Okkukkk2a„.(_www/|6oޜTUU99Hk׮ nO?tzlٲ%6m9sd٣H*5/@pL+#B{{{Sze]]]G;v a=ۇ훚2aVr( ) |<}76mʆ iӦlܸ1ݹ{q<Þ;eʔ̟??sܹs?>ܹsА?>&M:ح8ݻ77oGooo<;hhhȲe˲jժrO̴i*o&M… pŸygxƍٴiSzzzylܸ19Xuaϝ:uHCCC̙yvx!aW,G y]]]HfG[[[y] 444]%uuuY`A,X3穧JX̆ 3lnKXK+ V ?^pajkke!TwNOOϘ=sB0Mͩ`GTWWgYlݽ{wyȦMnݺfڵC= 6䩧*?:|5J̛7/MMMillqw[8BgϞlٲeuIKֿ|a=3u v'ijjJSS+M)Ms='֭[ϩKccSFL#D!@YX3ё $I&NYf9٣8:|1ر#k׮ͺunݺ]6k׮s7fڵپ}{)SԔ qغY 8B7jȣ3_(ͩ`G/ʹiӲt,]t3cӓĤB0"͟??-2e8(B(gϞlٲeDc}X,6c޴Ғ)ST##äI?e۶mNWWW:;;ݝGnlذ|viii)Oijj*[ZZА0 8P,ǜaÆtvvfpp0I2q̚5xd wp1cFN=Ԝzꩣ>gϞ[n?xntvvf׮]I,\ptoJBƜ3IR(ʓ<-[+VOsssjjj*8qNLOOOyHGGG|<#wu֕C-5MMM8ql 8~!'֭[Mhkk+hѢL<pUWWgY`AF_(ʓ<-[+VOKKK+G;.w\/_>ⱽ{fڵ]׾<#H{Xt,[,K,Ɍ3wKߟ׏9϶m˓5B8g#Bc=۷ QCHSSS&LPذwެ]6y#6nܘK.Ͳe__e޼yb8"7եqDȣ?2} w,<[&Ilٲe/{Y.]3fTr8r p1'{ttt;+/ #BRUUUCiݺuys">`vܙ$ijjҥKeYlY&O\ʡB8 wNOOψGi裏fǎBaijjʄ *pɃ>X*d͚5yg<;Qd۲eZ {͛G<ۛ[C'iӦU+h200'|2k֬}Wuww'I̙N;-x+riN)"QK P,G y]]]Hf٣>JTUUU+d۶ms{R[[N8;mmm93RWWWr9ǜ1'{3v*/ cN?~ZZZR]]]8ܹ3k֬)DV^z*'O+_rHsܹs+].@pTGs99+].@0!LեqDȣ^xqfΜY8lݺ5W]wݕիWgٹsg͛s9'{n=,]4UUU.#@p5Qگ]6{-/ #B[ZZR]]] d͚5;0rKb̙3<3g}vWҿmF 8,~1'{<ٶm[|}}!~…`Gp .Cn rHd˖-ihhy睗׽u92wJ!&Ǹ_~DȣDz}B0jȣijjʄ *[xx㍹[ imm\+VN}}} \Xۛۈ8"Qڟp >}z;Ʋk׮y睹[r-{ͤI֖OOUUUK%qX,9٣#ٷo_|Pap7ߜo9zkb,X׽uy^̚5e"j(}ر|P(9٣5MMM0aB;*i`` k֬)O300W 򖷼%rJ @ݻ77oGoooJo﩯/;y'xbMVd۶msOgҥyߘ793S]]]2@bqԐGiՕ$ImmmfϞ=jࣴohhHUUUVYzun\wuy3{YbE^ aBڽ{wzzzƜݝ;w QC}sssjjj*pr7~&L>;\pAV\Wc@gϞ=ٲe˘=JܜSV#77xcr-ϙg7My[ޒJxLR, yttt+I'f֬YcN(v\O笳[֬\2 .!QoԐGiՕgy|P(hmmMsssjjj*k`` WW|Ν;W*+W[.&gϞlٲeDȣ'uQC}KKKLRs5vrYgK.[̘1%uB8"1'{lذ!LL81f(/^3gV#cS___n|__$9r\xᅙ:uj+<:p9٣'ٵkW|PsҒ v(̭ޚI&WWs%۽!$Y~Gig۶mcNhhhȢE2y viӦ?S>wyg.\K.$z׻r 'TqG U,G˪U򶷽-SNtyG#GCX,:٣~'SzkH]]]ǜdɒ̘1~|L81x;ۿYlYK;bLtT,G y ٷo_|P(ooKKreV@a{y=Xo^>_(Mhkk6壩)&xGYf/z뭹+7)-ʪU9*]fET !ꪬZe/ѽޛ$Y|y+8∐ޔ޲QWWa'{p >}z;##<~W$Ks嗧•^!G )@j IDAT!ѪX,9٣#ٷo_|PА vӶm/})aÆo?/tiY1+e(}ر|P( y755eoCaƌ?w|i|Cʊ+*]!'1a޽ټyS>z{{!r!˖-˪UʁO<1ӦMpW@mmmV\+W;'>o̫^|>j!!QX,(2007̞=hkk+KA`|8sgg?r0h^WJ 8޽;===cNΝ; ByGkkkۇ훛SSSSCUzU9w}w?9sg?!p)}z]7z*И}?|_̆ ښͻU}}._Wgpp0??|2eJ^f޼yȤI=?/s-䵯}^g׾/~YfMp\xG>3fgϞlٲeuI}}}9А˗ܜSV#Hqg榛n}ݗ}c_y'>s9$UC__ ꪫjժJV)79^{mҏ_l K/ߎx7ͬ\ոw\pG}|hh(mmm;O}*=~6]vY8G?:Pw_[lYL><{M,_•V, yttt+I'f֬YG)d|Cߞ__˟OtY/ !p뮻rM7s9(}/7pC9l۶-;믿>g^p 3LnL:5rfΜ9|La?|#SLIt??K|>K_R1׼52eJ֬Y}??-0R__ߨ!Ҿ+,ܜ v:ۿ[n|Ωw?q!&GB)Mַ__:!oyW'LkkkM۷_WUW]g{'g}CIk:)x UUU|wYgߞWvttd9裏gߞ={e˖!'֭[GLGKKK9 Pik{{?^+]8!ҔBO?t ˜@H__߰| zk=ɓ'/6mʜ9ssI'U1 1Oooo:;;hѢ?[ 'tR֬Y뮻.y{}k__$#ȋh^veg?1*8{od,}}}d„ #{(!ɳ!+2_W裏&gqF.Xbٻ;guVM̜9E]ozoMV^M6e„ imm//K.iB86gcNxDz}QC… S[[[Hk&}{3u\}y_]B#@\^zi/@>OU p4*N(|r.cNXdIf̘Q8P_G?ZнJ+tMI R,G y ٷo_|P(/>۳jժN+GO>9W·>]wݕO}S=l5 Qk01'{ttt;+/ IϝАpc__sͻy477bvޝ!G͎; ByGkkkۇ훚2a;79gyfo~)޽{y!N(/;lٲZ83mڴ v[ti~|gsWvH_S X,HWWW$={v9V^ pWL6-ַr碋.G?!{=Fؽ{wzzzƜݝ;w $ִ777WSS+2---+e˖|3IuuA-c̞={e˖1'{%`GCCC/_>lGsssNZ?y;ޑ۷_ALX3ё $I&NYf9٣^ /0ws? ^C `5Qwuug)/ `hɄ=tM_mo׿k !ٓ-[yO___?lZ޷dʔ)8sr 7 ._멮~8L☓=6lؐ &I&NYfŋg̙8믿>~_K@A7dtvvf׮]B&MJuuuf̘$:ujjkk3eʔL<9ӦM3yL<9B!'OΔ)SʏԼ_U8pbqԐG)ݝ} $˗'{-*^/W\7 ihh8V >.qUWeժU.y<))ݗ;vΝ;ӦM+#JR8̙3WТ6SN-_.'Ou/pQ f޽zسgOy$Ͼ>I9o߾ر#ɳ!ܹ3vΝ;.ӦMKPHPqW^?2gΜL8qrt۽{wnl޼9ӧO1G-L {˜9s2wOccc&MTy∐ޔ>>/<۳jժO,O8L<9_׾6w7逞gBp1!^ڵknݺ[.uWWWz{{MŘ4iRΝ̙3's)K&R L2S}tw*&&FwyUYYUV_TnnԜ! :TNNNڸqc}_;kǎӦMk.޽['O5}t 8+???)66v%''kzwS...馛ta]qvء;S%K\cN|PǏW``>BFoa8p@8pf͚d2 SXXN*駕vvZ~AvvvirAǏ5ڿ8yxxhڵz駭ڱm۶M|6nܨ'N($$D? $'''[ \3BCCI&I8}G̔`P]]$QmB䤘mݺ@]+Z@FF̙ >\SO=ѣ!C޽{_ӕ%KԵkWuAࠪ*!f6lvڥ .X}Bh'*++f[JNNyM4Iaaa.hɚ]_8cꫯ҆ sNeddغD@=zh޽V'@U]]7xC={K/q)##C~~ۺDЫW/-X@Zpvءp͘1C'Nuy6,22RiiiV#@~_O=FQW@@@`0mEKZsKMM. ( @Z|*++iӦ)$$D:t^xA_]]twK...[1MIq>|X>x㍊W]]]h?"##j=!1WTTܔ7|S>>>ZCm&-jo~ӧڶmiş:9sF{ф {d2l0n֭o~_WuY}z衇xbr-/V^x*??_yfݻZNcǎUjjL'xBFٳg[@#GX%Cݵv'>>^ӦMu@Ք)Soӕ C NdM1?[CGGG޾Տs4u/SN$I999=zv)z^I[ IDATc4d>}||tuբ_ZZ"##m9/4vXuQIIIi=-)%%Ed2l\ O_};vLBmČ3rJmذAfͲiJgÆ kpeddWFcu^E 233_zUϑ#G4~x͞=[Nҗ_~kذaMk,`GݫgO>Qzz`\]]tRI?!C(66V:v&L845k,O>i<Yjuܹsl2[nф ?I˗/WnnZkԩͯl٢7{|ll?項o߮si̙.F@-\$>>^ӦMu@SDD˗ۺ3Ь&LnMO?E *11ѱEEEѣ m'NTTTT-_\&MlM='>nb56K=<<Ν;ێ=C|͚={Eok…عs&y4+AvҠA$Iyyyhɑd҉'~vΟ?/J\]]U\\,''[i^xA ׯF87k|kڰaFW^.KII$L&WSii<<FRRƏߠ׶m~fJc߾}ݺukSNr%Y_J^}UeffGї_~[oU?Ξ="5#G*44TKnnn~KƎ;&Im!?ӧ́קNҔ)S   fZ[cx{{_ ￯o]3fPΝ5p@-\PUUU_XX9tykؾ{yxx쬶T'--Mݻw7 RnnSNzcǎ "##uQ[hK7w!ؘ$ɓ6V~~~|`ĉ>L媫具 6///0 ht` L͘1CV҃>جx{{̙3sp񣬬gK5:›oizɰaìr%ꫯjݺuw 4@6֧OyzzjժU..^#&&Fk׮m磏>RLL_|^zI!!!rpp$ᇡCZIͪVRREqٱݺuSvvEݻ3 ɑSbڴi>k׮{Q߾}/[^um߾]mxb[vZ <`0X]yA-^/}Y5.띫x oӸql뒮ȟ's=رF%khzl/isbcceoo$oV=/ƌ#???_zO^v!CW_>sVN2EW=T\\ 6Nk޽Ν;5yd?~\sըQTSS{GNNNJNNo[-^YliͽW[yy _jҥ*((жmԩS's WLL̙AEǏիk8q͛^z^s=v9onI#Fu96Bh̙` >\'Ou9- z͛7?Thhn}ڼyzarO3f~IRhh6oެ>@7p[okך޽{+11QsΕFcjVwI}Unݴxb-YAߤ$hРAرr;[`~akƌGHH>CZJݻw?kѢEk:4Gcc.^Gk箹mͩSZb~agϞ ƍ9R ZfnFuIC QVVջwos߽{*<<\<򈼽Sjњ3gN:M>]}dooomE5m4['NVUU>#\JKKciݺu4j([bRRR$I&ƕ@e0zj7΢Bh#|||k.Zpjkkm]>STTvڥO? .!!>>>JJJҬY4{lqڽ{rss5e 4HݻwuYv@mΝyzzjРA9r [ӬY$XB6mR@@K3BhzO?T[nUQQ5h ZJ./c)44T+W_W>|X>KSBh?\28q4c ۷hD~~͛pEGGk߾}Zh2333ϨC.Ў9غ<UPPD-[Lz &(..N7xkOfmٲEz([BLn*--My馛nRXXf͚dغTp+4h|||4eUWW+11QyyyZx1a@cڱhz7_hÆ Z~ϟΝ;k኉jrkBqq٣]viӦM:|:w#Fh̙ԱcG[ `ooKwu͛#GhO4}t+00P ҠA4p@ٺl](,,4@vޭN՛o;S.p!5(,,LfҬYTQQJJNNO?2*::ZvԷo_yzzںtt۷O޽{*޽{kРAz5p@y{{ۺ\u@׸:h8p̙*}ڽ{ݫ *//OA=zPTTd2Էo_yxxz UQ]]4s#%%EWEEէOw}KF%`F 댣mnSJJK$___EFF*""|뭷VSHMM4CTVV&GGGS&I=Nz뭲u4@kn;~RRRjz7UQQ!EFF*22R={T=Խ{w`0p6^)##CJOOWjj:C5詧ҭު98}/@j涚edd:t<-[hѢE*++$9;;+44:jJhjkkUPP`}?Ϝ9#IrppPPP5tP"##.777A 4ར"effZ<8?X~FQShhh`F)//Ol}1sI U>}_Z UDD\\\l<.!EFL&L+##CǏWvvrrr4mݺUyyy2֭t"___uU]vUncޥK9::4qN>'OԩSөStIUPPJnݺ_ א!C`p =!l4,"Iuuu*((ǕǏ'Oԏ?h.//-///Fs/}]:uj_TZZ"̙3/}̙3:uE^]t1|v0uM R@@աC@9 ___o'55UK,QBBJJJ4`M2EpBsNNE@V.WWWs@U\]]ᡎ;^nnnrrr<<<$I:u\\\,;;a[s9UWW[}"lqF%%%+**TVV:KJJJTVV2KJJt9?5V7ͥtUs@h˵ak״iO(((ٳ*--5~qYsٳ:qΟ?ϫXuuu 텣%\]]&OOOyyy)((H;vxh4C3?l@hsRSS%KTqqq5b_#~ϫR:w*Z{j8;;{:tkAd^\{6DVR||RRRٳgkҤIڵkhxh BM(>>^4j(н+`$!k͚5z״~_Ԕ)Se<!ԯb iȑzWc!:qVZ%K(55U&I|Ayxxغ<v@hqڱcn:jJLLT>}l]@G <%&&7ѣGe2W?l]5@E.^ 䣏>Ǝ3gw޶.D ,999zwk)''Gŋ5qDغ<kl㏕͛7K.zG5uTu\7:r~m-[LOrJ=Z.C XUQQ+>>^۷o&M uy5!¡C;hҥ*..=ܣիWk̘1rpom.\7*>>^IIIٳfΜɓ'+00B(>>^+WTeeFm۶{`uyh3gϞիooM7ݤ^xA=tb BNԯ益F7oCBk͚5Zx{EDDhΜ9:u:wl3SWW?\JLLx-^Xwy@ 5@,Y L&C=$www[D vV;vP||֭['WWW?^|nV[@PnnVX7xCǎd_M0A.WډܹS裏ᡱc'T^l]ZڸtXBorss5x`{  ۷[nzG4m4ں<ڐÇkٲezUXXk3f>~7GXyy6l`^ _=O`[6@6-]TΝСCzjW@F Vt9\R g}V&MR׮]m] !SUUFm۶{`uyhgp={VW믿Nz5ydy{{ۺ<cBhaXBտ/غ4\#kѢE:xL&GhuygՎ;?Xz;o߾.0!\|%$$(>>^2LZh~aٺ<\ n:iܸq1cnf[ +V_WVVL&jĉrqquyNںuvZuE>L=zغ<@~G[ZlN>kʕ=zm]`F p]۷W&M?nV\~-_\ot=h3f96\7˵a+))IGկ_??njE ڠ*)''G999͵|deeĉ6֭|qY IDATlmܸQyyy4́ ) @m"TSaa)..֢E4sV=!***TXX|eff*//vVVŪFQU^4dBBBfxEEEV={(''G%%%󓯯1.`*O?՘1cTYYixlB %!Ђ˕g5Q|1Jͫv*22R 98?FFEFF6… TbsB#!!!Z.]\1ڲerssE;!LB' TWW'Irrr90a2'eoooI... Uhhh}RReo-Q$0Lm???u]6ѵC󓟟L&>UUU:uk={(;;[1F|/ہرckMh o_.M rttlhDj:g]6t6 B Z yogee ]!!&&pMkNhrRRRq&WȱaX7Hu 999*))1{ߘ'Z`0pV@|HeeN>mcjjh4<"77֘"!@;WTTdqSa,?ҰdѼ{ጀk'dOUUN:etffj*# Tǎ[kmTS7o_hn!GGˆFa}eanh?6PQQB!,ԘF!!!rssK 2D۶muhGFL&eC#RRRsHS`0!@ +//W^^럏;ZI?m;22R7`9ZJ}fk?W`0鹷siY^^^ӧ~_롇ձ?O?TrqqQxxo\kTYYt롇R^$I;vТES3gԣ>b (ѨF\pрZRR222T\\lq ?ήE um8kڴi.4};tΝ;7+~~~ gpAck-=̽xq=.\PAAk[ڵkճgO1[n_W 0`7jΜ9:{ys7|_~Yaaa*//׷~w nf-]Tzҏ?3fhС3gN랔˨F.I^^^?-/%%E\4իWkܸqB5B/_7noZwKhsok56UO||^~e}wԩ$)''GGΝ;`LRR bާ8]ZKKKSddE dEV!VAAA̵5Gh/X F_QII}أHM8ѢWjgϞܹs+//OFQwx ~퍎?7ٚ9sv!WWW 2D .Ejڳg$鮻ҼyԫW/~zg{nkZp<{Nw$EGGW^ijOOO׳>;w@77wkggW_UddyqH9~şɓ'kҥͮjrǚ|uM+_"IZ`On5 "I111׮&XDDE?k///UTT9؊BCChJ>}ǩJJJRVVjjjcF]}[HHZcm{EEEV-++Kϟ74a2,n4޽<==m8GU>}w^yzz*--MO>uuu2 2ey***O?Yfiٲe>|^z%-_\~z 6LvR=$r5w\-_\vvvڲexOOOא!C?w͊Srr.[5w\ڵkFqE^{{{m۶MǏ7c^鹯gZ4Ϋ5ߦ5\g:u{1s d˖-z'=>66V?QFEW4=qrr]1JNzffj*# Tǎ[kWol5m4[h!~[ޥeees涣G*44@H֭[8sÇ5x`&LnMO? oUbb$iĉjo4iE=&L7߬ٳg[}ˮZa0sN} kwժUz{ \9v}i砾|9wyTb999]vO+ JHHP~4rHjl%''kѢE:zKu޽YǺ@"Ig0zj7β@@\***TXXh5Qhq7󆄄͆6\zСCUVV_|Q111rphAgΜX!B...5uM_}-=zT P~~~ ?<:밯QF&ۨ@:k-j6VZlZ643YMY?Mrf-\Qs̰rrdpA@΍|\׹9|[vuW68pj`@F n\epܹsM>]K.&>44T ѣnݪ_ȨspHMvEhΝZprrri&Ƅ_WLL~iVFF-[?P֭ ?Q;w~G zGW&I|M˳ y\"--MΝ/{LYѦMdώg,XS]&r7_1|_/͛7UXXP5Ji_Um߿ym߾]ԯ_?T.]{N׿d2ԧOۊÇ7URRN:7MNU~>uWwQ:ȹ_b^x;vLz뭷4bĈYWHMkvz===ռysu]s|Ayzz:{A͙3G*((Pppbcc /M6q;wŋuVU;vԯ~+M>ݮ޽{E;v4&U_nTiP=ɤe˖N @}C P*8++K:sm7W$ 6LF8ݥKtԩJggg+--.0e6~lXkT@ܸ!n8llu)p… ޠ|A%''+??6<4/җ?[,8cL&yM8Q>>>OhFԺ/*++aȣ955U/_$+00}3**JÇ&wwwWP"##YGxeee)''Gy===լY +_GDDY˄YFo~EڵӓO>~ҀkRnKW~Pm󢣣Pנ&B~bb֬YS_w w36l ft@ꐔ]m:$8AjW":;))IJKKSiiml \X,uDꑙ3g; {L0MppL& QHH)..ɓ']+==]%%%9f.# S&MLu,//.q4;w6|y  \ߠ$oooEFF*221B#RNNV$)]}}Ц.#!!!yCZ+͛}SO@ $4rEeefJ"WN@ԧrr5kT!:dT!%;;)111tH@"EN*m.=Bg;vc0x6l6EX,:)//aȣuffΜ958{L0x2LusT@U#g%E;THHBBBpLqq-,رc:p+?~\%%%9AA Uh`򔙙,]t6Ixxڴi~)""B!!!jժW@#7*=]. yCtwwWDD"""*SZZeddرc{+xuQ:t-VZP?bRp;Ю Uhh$iv(JQ?ঔ#)4aN 8LR+CkBZX!u,8 m*itUTft@ԉҘ1Ҹq=HI]M24nF7+i$??ifi@+SR(@H!'MXdizleD%BF!Pk)S$77iF)&*LIR 3!VP =4lԷgOHR2"+R8BJחu))#F2j)w] 'CHҙ3e]Azi RY $LaFI6I'K/J.mtEA IDAT,S Uep:hΟ^|Q2DSJJ) tXVm.8!u&-X }|htU툎]'!qp+HRvҁ҄ FWuCrB7 ٹS4Iɑ>P:ja$!@#B ŋe]A~sb) @ RY W Vѥp:h~X:zTid2ZuXVmeRZ!@T\,tRe]AXD* S;DB4B4%%e]Az||={^\ZeU @HJ&M*{=[z),9!@# a+-- -Iw7 W:ÒD hd敜,=k̙U9a KDOhXVikWiβ , "B4>B\ xkWY0JRn-F9wV)7W;ݍPt@28_Z+M^ݍp*P2E].P-Y"u"H e]A<<$I&I/"OOOo^~T^+___K_}WS耬!hrr#_J6MJLz0lڴipui}Gzl2ۘ;wjҤIzꩧG'Є k.gV~_ 7NftPcIfuԯ_C4{lٳ5n8Iқo^{M>m㕟?n-u$ѥp2:?FƍƎ2 "I&Mwc{?hƍ1B7\Q:e0ÊRe!-[%_jY, fl?-ZT*(VYW{U].0ɓe@ƍƌ]\oΠ 8q'Ne˖Rmd+_ъ6 8_}%u*ҦMe]Aj0vV_n9C*WuQK`7 K/ -X M ͛'{9}jڴbcc%I6mjݺuuvܺ[I#K`!k:iTKi:?d>}xbW3%WWjije~*Z =]0ar?F ѣOHO>)}ԱU5z_kMRB) rR^w ]$m6E+Z71ɑ,osyj#M^صKݙ[UwNPO˗ӥ22ٕ]Av, a\ U:CF 4tsH۶?.]&L({-Qd4)1Q6Z8A+_QKPO]:IR%D }&H;vHwahm@ <.@=A:yR^d~xqg?@+QhkѥG *M$I?߿_pzi6 JP C h|SZ^*.vyqk4os5۠ Ȣvjgt)!@CcU|Yz9aԅQ5DC.@=C hHrsc%q&.I}pͲD%j] zU8,RZZswwgOinFԗ~_] z@77WEEEQ(Hfl6+<<\jڴ\]] 鿝:qewOOO8PU#W+..ٳg++NRXX[gϫHgϞ `Bte}+icҢEr_ռysG``7o-Z(((HGfj5C[*VjѥN(558vrrrlYYYϷwqqQ-^رPC``f-QTUS-ϝS~~-8qBǏ xyy)88X!!!vX,jݺ~WO@KP )..VJJ>T=z8z]-[*$$D!!!jӦ맖-[*44ܢE VW \[g+:ǎSbb;cǎXRYhb[bQdd:t Z^Qb:RSf5d())Ix񢤲 V=t})22Rj߾=AzJΝǑ#GqFDRuЩS'EEE)22bKm6)O#4RSB`DҞ={{nٳGɓ'%IС:taÆi̙j߾ڷo/???G]2͊Vtttϊ~IҡCtAZJNTֽcǎ޽unݺk׮\7|C=HKPOhJJJ{n%&&jڽ{8 .CQQQ޽nr-С"""d2.ڵkviv꧟~O?$ٳG+WT~~\\\Զm[uMݻwWիW/]%j.@=F 9s挾{}7JLLUPP z޽z!EGGG2d4͛7W߾}շo_YYYJLLTbb}ԱcFs=ꏐu9~c%$$h***RPPAij׮׭Bjt)1! HiivڥkÆ ڵk$gϞ9r|M[W 8QFiԨQ"%&&*!!A۷oo[=䓊5x` 4HW^;霾WF#p+**҆ l2_^OVxxg}V111jڴeS}Q>}ϪD;wԆ a͟?_۷Ǝc*((貯BtIKPϹ]]II֯_GyD-[ѣ:xҴ`;0777?k^Ǐ'| Ull.\ӧO]5Li] z@M$99YO?5tPO5kӵm6=Ӻ[.p@=?'Nhҥ SO=-[jȑڴiVѥV븎k6=`t)nB9ժx1B۷ի3(%%E ]&`8ooo;V+W׿U뮻Թsg͟?_Ν3J}O#(Kp POYV-_\]tQll b 9rDodX.Ci˖-ڻwz~Zaaa~BKc} ] zh׮]ׯxvmڳgnݪ1c"66=QzZp5sL͛7O۷עEte˓$KҏQү.M@@=r M0A={{-YD]vcL&nM2EչM&S-WR;om0ׂZjÇ?֥K*SbSM6U޽/k߾}qVUK,Q^Լysy{{k׮z饗tZ]K/ú{+::Z;v"-R]ݍ.M@@=euM۷o׊+m6EGGqV:~>SjJzƍG6olqJyM}VN>۷kOÇWaM4IcƌѾ}TPPݻwԼyM-[ رc;Cu֩K.4o\_o>i={aB TZ_׆dD X`NjtTK,ɓ5j(}G pM&š5kK/ux*[K}Pk+WU ,o={خL5J[n9񊍍3((HW->>ϗG:Zdz쩻[#GThhh׵7xCuajҔ]UbnZ6lPTTm{nnڶmCE8qvg}6&..N ,uױcԤI[WHLL$EGG\ ܼL&-[n;B rm޼Yar&I...jѢ~edd(!!Ax7¼aÆiӦMqӻwo1BׯWII$uՆ8w^MbQvvݶx5ѣG+>>~ӦM 2¶k=5U\:Ԅ~mhĉڹsu릇zHRxgm ԥ4)Nqkڴi,\PÇW-$I~Fa7ڻw}N4soiH%$$S111F"I!ϟO? }ssseXd2-[Tjjj8<Ί+twhj֬w}WUwjl6۽r8uZlYanppN:U8GۮTMs<6mއ_W @,YT]xQO=Tx#<==5x`}u~w1SةSjժUkT|x ۘ oZj֬]hҥ:yڶm[nE]K.CӓO>iW}yzzk׮:u֭[e˖]SG  /%K]J+5kV\\j͚5z4k֬kɓ"?~\B72/44T *..ƍvMUf֬Y;w.\'NɓZhΝkW^yEo/^'OZ|^ sZ.^T-]T1117o6mڤۘp늉Q\\UTT#GhΜ9zꩧw„ JHHPaaΟ?DM8Q>XAZZl٢Ç1.{zDx^ΝձcG͚5K?>g͚ٳgO>QnnΝ;͛7kذa1weffD999;w_kB iӦi2uְX,ϵtRiF={{=93>>^0`4i޽{ŋO&$iܹz衇ӧk޼y׼vG۵ku?WddZn+Wjݺuj۶m\dd֭[+Wu֊ТE駟VgMiMkTOɤMo߾zHj׮]yw},Y˗C PllҴm6u66!!Ar&N@jʔ)5j~UY_]Z>}ڶmѣGqVjꨞԓʌ3ohʔ),**Jk׮ղeԺukg?Ӝ9s:̞=[WVn~T} sV"J ,ԩS.ٳٳe ^;C-[o߾ur nNL¸pႚ5k#G(44*v%&&J n^&I˖-}g!Ӛ5ktq 0N!@cURR'xBo-ZTgaIZUگzI/]|曚8q"aBֺukرC.]RݵvZKt 8P/5~:;UV4VcE]idRhhۧ?OuT! z ""B}FޝGUOu @6 -*hN`Q|ZjAm+ ]lֺߟ֭EJą@2 Ȟ;c,rCzC3~Ι;}rrr. 趪b 6L5{ zB_Q=:9sFo/@u.6B0[N[nU^^.2-^XFt+o'|Rݫ#G^J,b,233j*o#ҀVuuy5J_&Ml-_\AAAA#z䂟 @@  עEta-^X/4w\ܹFaay 0@{F{S~:F5jkf(UrN.l6n׋/<?^#GO?\K ӕ^xA-R^^^|E1SyQ/ }_z^@@@nf޽[wUW]UV)99YW\q֬YB SUU^{M7pu]wI^~ev?Wtttץ*-2e(C4"p;v?ȑ#zwu'PBB -[LvRCCѥTNN}Y͜9S}ռyTQQ*))onIոB+tR'sܰtO~Fsɓ'k-[wK/'xBX,]ӧO$l6YVYV'?ѩSkܸqJMMرcopwΝ;GO>Qmm.rp X,k ]%?[cRtCBdedd(##C555ڵk>#ڵK˖-SyytW誫رc5j(]:zӧO/Ԟ={k.ܹSѣuUWiŚ0aktLeo Vts8q&N(IjllsNڵK_K5j(5JG֨Q4x`t3%%%gi޽ua566l6+… 5n8};Q^.]rh;j0>>>>|{Gt >L6mҪUT__ 6L]v !C.opoЕ(++KYYY:x𠲲o>H5rH͙3DJNN6NL2]n@ԻwoM4I&MrѾ}w^۷O_}mۦ<9)99>>Ubb\ADƪ 5[hZP+//OՒ$%$$hȐ!0a.\ DEEܓsS"Lˌ.@7G ҫW/=ZGnXii[` 77Wv]UUUmHHh 0@Ѯ+**J}Q>}ǯ:Ryy;R=zT:z ܶksԿW+t ^o6--ڢ]nDEE)**JcƌѣG]+NÇHGU}}"##շo_W@WDDBCC0f)44TNРSNɓӧUQQw)رc:~x~)..Nӷ-W0ǹ?66`N N~j(MiF/4ъȑ#ill_RWm:|˛B"m???( @zR``MԨ~géNOV}}***بrIRYYoVhllɓ']OvTxx ь9Yo߾RttmTKUZ*KC@~_~z^UU***TQQZ%ù]QQrWߊ 믿V]]*++USSjUUUlTvN XgDD|||(""Bd6%IqW]+)""BnֻzWNU1=t %\bM&^}U͙3ǐ_kݡ;tnMr >FF5j+D!dt9zVsR+M۴C;pðB'D˴LOiXv(Wj&jh@;,BUJ/%d2=buw.%Fchz@)=%,F#UijԃFB% z7$]tUj|WJ>Q/2$D zLi6}ot9B ]kat9 8˗R5[4KGr#!_dMh zA> @_3@RJuU_[zK 4$htMWMKУթN5[9GHUBXͺYcm6(MԠڬMڤmtIfB8 j]o5]Qը;tz[ok&]=Fuw7=}#8нWWWo M4KsF @נ@?TM5$8/Btk-E7茾#} ,F@nNj+}%F@nD%)Z_kk]t(!JW|ڭx]t8 >8S´C;ջZWR]%C Efhfk6kntIpA]p՞={߽{L&k_~;!zBOiL2DiŊZn]k֬њ5k\ۑ*++zݡ7^ԋU]t .6ӧOof̘ Lu\iJӻzW= zV)==]*//ڦNrK'Vs>LI.ҥBN馛Mdd&MԉU oM]+_[ `޼yX@@nV@oGqȡZunڪ꫾F/9k#G4{V3BZڤMZu[w]Bs`2t-( c+ VH=G@p͛Z}Z`L&AUu}[V´W{]} o}[4h۾Z͛7Ϡ5gkfikgtYХ4hn`E]ߟgљf1]kZFkW 6!y?%I; k;OS}ZvmH]vf?!y8pF!IM7dpE]Wj4WsըF9ПgCkViJXH.@pn6IRjj zDNu$.%,z@I=7"ip]p1V~~ۗnW~~rss%IҕW^xWbb x^gj!k_UJ}O]u2B$yyyS~~ WP%$$(!!A)))X,QDD ۷+??_GC#Xu_ٕTG jp{^:3Tv~]ӱctl6l6>SNڛf%''+&&FFٳ#Ԧs֪|6M3g4;狍Urr  Nݩ:, ԨFݯ]}W5\U\\1aٔzIR@@ ZX,edd \oRj6 khdȐ!xiAZ OjW7B#:>jjjTTT5aUYYjo6]!aA)""\AAAm^eihj*;;[家1<{NJJd:kTF5z|O~WiSyz !ʚ=,v56~ r (7m;>>^~~J˓V8WSCC$)00PqqqC#0`[=:}sL_!ըٚ-,+|,@s笮Nǎ8t***\^#550[\eVWi9ptiW_˦!ZF9$IеV4M5Y}շS xtUaOJ8E@RRzepM@@@^<]ڸqrssp8|(rgQu/VRj6 kh$%%Eaaa=- :DMM=v*++%I>^~~ZV߹H^^$IIJJR^:{@ :;vkСCpw!>99Y[\eVW:ptiW4 $''l6wVzowwwtww$]Ţ ( *SMl6effjƍ͕|vV.@݀sɶ999:y򤫭lv\kX&&&&٬Tz|FEEEYVNﯨf+8 >Jhڴi5l0YYYzꩧuV;vL}ĉkСnm;z:}pSz<\-2RWWcǎy _ZV:tHgLnqnF"yhz7w$λ;':Tɥ =d38sáӧO+;;[6lW\W^yE#=O~zZ{߯z޼y|A-]TO?RYVp zu׻w]uB?&:bccu2+veffjƍn?כ44/??~ /+##2IL VyyN")1:rK lܸQxk֬YPhhM6kמ~kߥ_,ŢnMK.պuuV߿_gΜq}e˖bHVVX9sh̘1T޽5tPi…Zb^z%YVl6j$'atM}> U=;!!A*_F{G:p-Z;wAk+#kɒ%?~D!!!_jZ˖- 7 m޼Y˖-տsw?/Bٚ8qq]wu 6hڶm ԮvΜM8Q˗/u]'___mݺU/o߾f1LJKK5zhm۶MSNh|͊sft饗jƍzr!͜9-`p>R:tH3f~;]uU{cǎ6-YkZt.^Xnokc/Rwu[p(%%EFuЄ RYYgz饗4vX]wu9sܞ׫W/)00PZ{Veee}moY^{nV׾,]z6f=ޞkI&]-o}[_ l޼Y{<_omի_^8~z;PWWcǎ}9Çu)W{g羘V`(!ph-a(}uO!gppi:<==]zGeXuM6ĉnV444uKR~[ n?z&NZSpp5~x?UJJJUW]ZUcsqEEEyxի6.ڼy6mڤ͛7+==]>+'O1ͪjmѯ_?}駊m]{!Ƭ3ߚ]-k*++K>$iڴi{5m48X%%%v۟qƩi^@[xt6t%Bp qk\ N///?:vFnA{u<ޞR3.gΜmݦP⋒!CwѠA>Ú>}6???UVV* v TOGiGcǎ.S~~JJJtui~v}otZvJjTddp#[ ~:\UUװfS^^$I߿kbQFFk2bbbBBB  EDDhZzJKKgю;_GEEn\s9rDn9(ETTJJJPRRrN5븄h͚5>|kɓ{yweX}>o6RSS,8qDZ[Wƍ+?q'NP޽9rD}umwV?;z* 𦬬4ժbKg2)4 __@ݜ=M"l*++s5n-vbb||| 6&I߿4w\5 Ox{X`~_B}YgZ筷Rzzk3SR^@G2JMMzj{ V P>}FRRR֙ԨbvUVVJ51x`#}Zj 'OjڵmZ)"..Nƌ>@wu u˗kƌjhhĉ۷{s풛e˖iĉ ׌3d2a^Z۶mskۖ˗k„ -[OSknϸzZr &___f͚X]yѝwީEKKK#<5k(22R;wB]k>@szsW$}{Scc6lؠkj6rw3yAaժlښf+!Jh`+##22z8dqžs;>>^~~kR_cUTT(..N_Vzkiҥ***R||VZ \4=KÇj^_~~f+?|O;vHj\RÇwiXx$߿_>>Cyco˸޽[?>W^zNw}Γ+Wjȑ#}=Cߖ ̭|P۷^=CnmJKKxbmݺU4i~+>>YGi{$^֮]ӬΦڿ,Y]vpkѪUteuH?^@wSUU3fS^^=8vtuE"@L&^}U͙3}?] sW[[R: W{睚[[3tkڴiFɓ2d~hܹFӣƍK/]J;ЙjkkUXXqU#G(++KOvokrrf 8!."red9Ţ K0LzgtmW^:x~3f"##׿UWCCf̘PL&kŊz.]L;Й\aoʚ}lƍ۵]BB|}};{B>>۸q֬YG}T>>>|fΜ9<11@artYz+##2 qnp'33Sjp%p2LzW5g]lVjj׉2UUU&9WZSCC$)00Pqqq^C#IIIիWgv@￯^{R,ƨ0_~Y#F$1B/ƍG ]304u>7m;!!ApX+99YEzK/i֭QeegСCUUU%ժx@iii6lBBBԻwo3Fsŋb kTqqER^^%K(99YAAA߮?6&_wz|߾}:uSj߾}nq>2eiӦӾ͜9Saaaufc~M:U ɓuޱc4a„ 6hܸq Rbb,Y }ٳe6kFQkK\CӦMs]CSLiv 1֝7ooϸIm{?}l6vϟ3gՎ>~Զo;555gyC׮'|Rƍko@[UUUV[~z!vmJKKwq]ǖ-k$Kx,"aqȦ)KH9}[BO@!Iͷ |CI4 $$m(PL/)lXbawmϠF7i<ptϽ|3y,[s9{h4J0o}[!=餓?g>j` IDAT_*X˗3$A 6H5W]u'|2rKky着z궩&qܹ3]]]7jwAAAAykkkYt)z3ܘό3{yǦMx^cHK H;~ӟr+0k֬Qs.++. _/e.:;;inn3_}}}Z\NN ,H73ʕ+={vG$)] H$I$Iұ3"I$i02utt ,2p^ %%%CB#I̜93#X۽{7_W/I{{;˸NnruVV'777igg'gϦgsEj}I/~ĿO+MHa x8p2bQQ@`10w\}]~ioCz|AA---'oii`H6 ]p1=Zkk됶կ}zjN86l@IIIDwupοiӦ x嵞ڿ?]v7AՑ'>?<7|3*hX9v<n38뮻.OǣJ>wYdrJ9iH*++$I$I$I4EWI$I҄  TTT||=88|rΝ;^C0zo|rvŝw9$Q\\̋/駟3<py1g.<wq>},.xG;y玩ۿ[>я2{l.b222x'g%33s9g9}r-|K_" Q__׾pXk4/z{{9nnR[{Z#n^O^{5^~>H[f @ 6IttFFGa%5I$I$I@$I$iRMLvN,22D=;UhڴD9sٻw/\zw}I>ODiiib+xǸ[?39{1/_>'?ᦛnn|#{puõ=v4Hnf1csGFFFʾhtvvܜ2FPRRLRUUEuuubN`֬Yi$I$I$I%Пɒ$IFs#`B+ UXr!K-'9!slc˖-ݻ7~$it555TVV'$I$I4y|A.ޞL$It\tNv< us吓2QN9rꈡL臗<:X,4!5>A^Dw#b ̙I$I$I$Ic@$I$M;F y˻3\y%7 bG1c^F#1Z!kUG"uuuͥxҥK1cxO4Ńm۶f͚2233<"I$I$I$) H$I8릛CKm˻I=tѕt3,Hz:W@Ab}64j[~~UFzzzhooOD"lٲ{&!EoKn {l޼}%|> i$I$I$I4y$IcCaOdP0`YV9 R?34XSQvv6PP(4lX,6do4S[[K__0C#dek I 2ߗTjLI$I$I$Ic$I4H4Вr_#tӝt|y,!B|WDA,bY&`0H0"]]]455 (Fٰa8p |?04b ̙3^Ci- xٽ{wmp8LUUUkҥK  J$I$I$IG$I:F u F}%#G aC RL1󘗦J镛w g#Hb;. ,2p$MZ477|FsArrrX`Aukʕ+={vG$I$I$I$)@$IIe{hCvvvw-=$?P"Qpho" TVVRYY&.G"!KJJFrfΜ9Óq z@^^^eUUUkhYYi$I$I$Ie D$IRZїsF+<;OSK-J+Ig'q}&哟J:yyy#V顽=is|t$a֭ٳ'> ,204RTTD IqذG]]O {,[yb&I$I$I$M%B$I$w]tĈB 4N;9tfPN9Aqg%֋(JPB9it&  2RN޸q#HZ O$TWGzJ$I$I$I $IAa, )R(bXZBX(b K'?$iHOO)C#H-[wD`08mGww7Æ=6o̾}>CB@`0$I$I$I@$I]SOez fw fPF!BS)x-fU=$IJlBPh*#XlH RSS멭ZUbph,u4ߗK9:7h+I$I$I$I_%It\uͻK -DLsb=J+K.Oa\ȅQ.:yd$hA )ꢩiH@!ajkk9p@VY|9sM(Xlذon0JK.%$I$I$I$MdB$It駅ꨣH#uH#M4Jk}9SL %RJUC"qD$i"M;3000Dq`pHeddLb477hz<@NN ,H5Xr%gN$I$I$I$I@$I:L=K-L2)2(QZJ(a KXb+{H" RYYIeeeG LD"!aC#'pf%/}Wa-///UUUIc)++#33s\/I$I$I$IB$I:h耥ľZj\r)0aJ(C|e K&;#$IzO^^ވUFzzzhooO [H[gϞD`08HQQ_ill6QWW|Tae˖1o޼cX$I$I$I)@$IO?-2Gݡe?O4,C;\EQW@AG#It|egg BViooꨫ7򗿤-v֬YQVVƒ%K(--X,8.p-[;R)--% $I$I$I$I˒$IHXQ[G9,`!B s1&LEfvG#I4,\ 줮Dأ-[D8 0|#I {2q$I$I$I$i2"I4tБle+{ؓh$HRET'(#4D$iNNH /ߎI$I$I$I%!$Ihmznv @K-SN9KYJ9,a Y~$I$I$I$I$ir$Iq#G(F?@r ""L9LH$I$I$I$I$IDe D$uM-D$pvL2Y„Y2J 3yi$I$I$I$I$I H)m{x#29\˵,;QF9i$I$I$I$I$IR2!$i릛Fe#&ml~ȢR„9S LUDN$tC$I$I$I$I$$IB7ݼ;I>mF駟 2(`%+ YJVr&;Ð$I$I$I$I$I:. H %F,Q#^c#r Duu#|h94@$I$I$I$I$i$I㮗^je3y7f-M]f0-Wp'rbڇI$I$I$I$I4$Ic.m6)ǛUX**B.LSNi$I$I$I$I$Id D$vyU>-6!,DNd<$NN`s{I$I$I$I$I@$I:l;ؑ:&o:.0ȉIc'Z,?vH$I$I$I$I$7̔$ICf7ov"% \沜bpXE $I$I$I$I$IӃI=a+[?~VUDc)K Hw%I$I$I$I$I-!$M=MMޤ:f28լn`5Y*(Ks%I$I$I$I$IIlCo-D=Kš|4^N?$I$I$I$I$I&!$MR9H=ld#5Ml"J AVs9?ϩլ&4\$I$I$I$I$I@$I.vo$BȫJdJVRAWs5Tr:SDQ-I$I$I$I$I1b D f?^5~oi E,dN,OSNdVl6 IDATkI$I$I$I$I$'!$Qh"_b$9S8yNNd~H$I$I$I$I$ 0"IҸi9)2/N;dr'PI%PI%r*3.K$I$I$I$I$i2"I8_׆?$I$I$I$I$ItT Ht%^jxW.$NJWS8Hw%I$I$I$I$I4$g?5Ҁb?|eI$I$I$I$I$MAB$IA3ͼ l`5 M7YN5TR9<楻$I$I$I$I$I&2I&e&b.f! )Oi^*|7xZxG[0$I@ @oo/_)//'77+Wrwi裏rg2sLfΜəgג$I$I$I !i^ ^E^e^%6~Y2LW838S&;]$I4xD(**׿5G… +x饗뮻Xv-<\}<|L($I$I$I$IIҴpkl`/"Ĉ1Y)cs;r.XJ$I {.wu~{"o~}k\y啉vW]uv~6$I$I$I$MB$IS/_x:#|u* twW$Isu wYge˖w!.q,'I$I$I$I H}%^JTy颋"0|5aHww%I$MqCAbXbEV(,,,u,'I$I$I$I H&le^^`?z!L5;|utwU$I41z%BoΒ%Ko߾ŋU$I$I$I$ISIҤ#D&oI&9%ɇ0$ItXN;4}QnƤ<vZz%I$I$I$I, H&nyy9Ko-ְ9IsO%I$| _ࢋ.b޼y]z|+4N$I$I$I4$v!cǙtwQ$I$I$I$I$Iv HH[=~lEI$I$I$I$I$iZ3"Iv~OxY.Rn6||ː$I$I$I$I$I& gJ4A “g;91?R.%twO$I&JWPO?Kj|&I$I$I$I H_Z)Ow$I$ijyqNS¢EB$I$I$I$Ic"#$:>N\Υ:~ďu $I4⽯==W_~J$I$I$I,+H$3pW~霞I$IAY ߦ>$I$I$I$iZB$M2r37SF?I>;ýkD$I5@vpI$I$I$IҴb D&7y+,|&; Jw$I$iJIXN\{G$I$I$I4 n [Xj~oVnff1+ݓ$Iuw'?9}$I$I$I$MB$i?Oj?5^J$twO$Ip5/SN+'I$I$I$IҴ` D&+ND)wy׹k$sH$Iϧ>23kI$I$I$IҴa D&nb9˹"$I$MTKgBƀ_嗧O$I$I$Ii@$M0Xmg [Ÿ'A$I$i2j[Ȁ'I$I$I$IҔg DҨF>ǹ83x`65I$I?|=kI__$I$I$I$IӆIJ~{lI'RJ5I$Iґ*( G$I$I$Ii@$l,s|7x*-I$Iұ* .I$I$I$I4HtIījVK$ItwwN[[mmm}vZZZhoo:oe/^Y xb Yp!-bIfJ$I$I$I$IIg 7q?CN%I$I޽{:oN{{;6bXұ,\SO=EyqCmmmvrA-Z.\HF~%I$I$I$I3"IcU^R.~itwI$IX,Fss3XFٓtl^^P" }ى큏]]]ر#e[lzzzFp%%%̝;$I$I$I$ir0"Ic!Z|S3?]$II;w7aʔ!rrƧcnn.PP(tX;::{,#aÆc]XXHffX Y$I$I$I4 H觟K|빞l-I$IJz *PQQ2xb2224'??p8L8h6mt3@#I$I$I$II:n^|!]$I1Ŏ; t %444ӓtHΝQNyyy#b#>_555b1ؽ{5Zp$^TTD aK$I$I$IҴf D>~Ϲ%I$I:"#VF___xP``(  ,Z,5.p31\ϟ?bh$d)I$I$I$I˿KqK/S~ʿA$I$MA4{&<Yf͐G|]SO~~>B!*++Gm?[4e466ݝt֋7oX Y$I$I$I&!tʭ<ȃ<#\$I),Uņ&o߾ı+6'هᔓؠqGF9FlذX,Fkk+cSUṉ$I$I$I$I:~1 "I$ ?zss3vJ:6>1~D/^LFFFF)%'GmΝ;G>R__V 2{$I$I$I$7B$ȋTSͭʕ\H$I رcLj $?xzeeejK,aΜ9i4~BB!***Fm?ZȪBVí$I$I$I.B$g?TQ7F#I$iuttJpI慅dffqw$GRi&پ};csss?~%K=Ö$I$I$I4cp|w$I&xтtww'XYfMʉ`0M#4|xF3FMMan ߖ$I$I$I =C1$ICF1p{;% l^(6l8Bí/Z,+I$I$I$M'P_׬a H$IS^GGLj;MMM޽;+**RN.**"i#>Ŏ;F EQ֯_OCC===I_F ,Y9s%I$I$I$!t^u"Dx d1I$Huvvsv'xիWuV>mv\yoHe"I$I$I$5z&IG) J(IwW$I$`Ƿ.EEETVVNgggSUUOߙ3yϧS'}] y^cذah"e]Ff>|x\?0p@~i Ҡ!|jݿ?~N-geeh"),,VU)))o߾>-I$I$I$.&$7$IPAA7ofddd0gf̘wɄ 5j "99X:t8s7n{/?<1vX~_`,Yš5kؿ?YKpB̙Ã>Hzz:&L`ᤦd eF+^`ȑ= D=+pG/X!CвeKz-ž={޽[np8L˖-IJJkjO> .uִk׎ /^{1P׭lLP(_yO?eȑmۖmoO?cY.*<19vmO?.(.(j_xb}٨uTf̘1mۖ.]pW[a˗/碋.M6k׎o}[XLm}\zlذZʎUTTD ByGTkmܪ|JJJ"a2'x"U=]\\ɤ2tPƎKzz:ӧO'`…,]5kpǚ5kXd ,`ƌ?4硇bܸq{ 0N8!R4hFb„ y̘19spB233ټy3A?5$I$I$Ia 4Γk|+ӠK$ɚ={6'N NW-Uu(e˖~ zDbb#}vN9lyO;4^z%ȑ#Yr%PzAw׮]Yl@O<W_}5_~%wqP~:rK.O?I&q 'b n&~j^ѣG3k,9֭[ǤIXdIԸP(Ta?l}mkj_|{.ӦM/'&&W^yiӦo#F0}tN?t/^E]TsPݘ6_|aø{# 0}t/^L>}(..椓N> !!!R-[0`6luuT^pr- :r_yyy'3B۶mY~=:t[v-p Ư*.BƏYj*N>: ԶwW3p@nO1cF>rW9jLm]ꫯ??HOO#YlɇuMmU/11=zD߶mÆ cŊQػw/Zv>1=vmǪvZ9H?+?0M~xg93WxeΝ;߿OUՑKǎ(R_5mݺ: TWkM:B!z_ԩS#݆0o7B\\:teuFlll3g D$I$II#!Be.Wƒ$Q[oUx\\z׏N:ѥKiݺ1V%33)S2tPz! {[o9s&  ''W_}[R\\g_޽{w_|N;4.r&O\͛7g߾}hѢ&Rڪ_ll,EEEϾ}jp]yݻm6yǬY8p o?я"uk[՜js\},kjusn|}}vnJvv6˗/磏>bܹʱ-[>.@$I$I$!~$I$I#,'pBd]MJJ ;v+`Ĉ:`„ u| W^y%+Vvlbb"6lo߾uuémmU]Xo}ŋs饗F_d ,[:Fm]q̛7GQz>|xԺcrr=ꫯ2k֬#]p̟?Zx㍣o]ZHcw7r~Χ2|J?I$I$I$0"I$IR-6j#?? JXt)yyylݺ5-[FC t#ulȑ\veѣ3f ͋Z9psw̆ +_?L>}صk3f̨48=å^Jrr2f͚5?ছnq>#FGN]2h ruב=z4 6-Zo2i$~_XѨk}:>m۶eԨQ'|V>Қ5kFFF?Ϗju]km_Ýϐ!CB,]kSj,.GѡCp8\qn݈ p$I$I$I:^J*:I:,!Be.t)$5YgfĉA!3'77 U˖),,PHe!ڷo,Ν;͉'X~~>dggӦM۷smkk.O~ :̚5%Kgv%\½[!Lpw}SINo_ݻQm?gyU*{mzZ|rN[oELL _yׯ_E}45B!9}QRSS+}zNo{4VUgmϭÙ߮r\.Յ=6mݻƖ.kӱ+))ʎHÓ PcGFI$I$IRBseܸ Ha2"IR3"U/?? `]ˋ{8&&&,%շ˗3rH֭[t):s?6oLNNEEEQˇ"lKIIERoB$I$I$UiP=$I$ 5n_㷨X͛7} VjB=&LUV\)Sp7]ꂋߺukTʂpϠ.]ЬYg*I$I$I$@$I$IMX||<$''Zyyy^I^^7n/۲e˨ v $%% kڒh"}Qbbb۷/7t\sMХ7r~>a R᳡k׮p P$I$I$Ij| H$I Gmuɶm8x`dl7y$))Ν;Ӽȑ#9rde ?%I$I$I_$I$I)}$--mOnnnTC/:bѢEdggSXX5ojotm6<G ֭5eI~Cep8\ρΝ;Ӽ$I$I$Itdk$I$IR㉏'99/}dzXh7nQc:y$99ցIgY+;;={D=2dHGٲ$I$I$I$ B$I$Iv9BXt)yyylݺ:TܥK5kV_SSY۷o82|K8}dI$I$I$I I$I 5n[PPΝ;lP}dÆ EOHHHBBݺu]v5eFZ޼y3v[*^Vz'&&,%I$I$I$n$I$IjZlIrr2ɤָ}MgffEU-{Qj~rss wΦ0jaJf$I$I$It<2"I$IN*//b 6o)..C^rn݈i94pT]c֭DƖʟ#psK.4k,J$I$I$IRf D$I$8O|||BM:T(>͞={Ɩu:H};5e7nQc}]0C 5NHHh$I$I$I$I$IR6j#嗳Xti;ITܹsg7\k} ؘSe]<"I$I$I$I )$I$IR8#'77Bp,-ZDvv6Q*t,DBs|6mbQc6eŰHfjV=/ $I$I$I$ H$I$)pqqq$''LjjjWزe ˗/gҥlٲ]pB*$&&S/s.((`ΝՆ;ʖsrr(**_>S, B-*m~kxe7Nz$I$I$I$n$I$IRO8&׸޽{پ};[n%'''jՋ^˃_r#עE :uDΝILLZܹ3:uZ޽{7۷o'''m۶m۶ eꫯզM}%''sөS't9~Nر?]p]0w.0dq\|1)D$I$I$I,!$I$IjZnM^իW_+>s«(**"''nZicŊ傂* pHz.ԩIIItܙz}&ޖ.1cwo2Z6I$I$I$IR H$I$ p+pWE7oޜ$8p`/ٲeK$DҮ];t {4Сի׿ik۠[+$I$I$I$/!$I$I:,g9Wr%Ws5wqQ]vk׎N:k 3{w3KG?BI$I$I$I:5($I$Ipq*w4|:w@VO~=  9PTt$I$I$It2"I$IB>\%|G\%5-Zرd I'C]AW(I$I$I$I!$I$Ijr5a! I !C`*b~K;H$I$I$I !$I$Ij2yW7r޽a ؼ42~iQ ##$I$I$I$3"I$I&)Q9?rvJdeBv#` 3 P$I$I$I$!$I$Ijߙ$3Y o ~uA0}ziPD$I$I$ITg H$I$IZ .r~O.Vdj0~` 7ʕAW'I$I$I$IMI$I$59;hF3<3]W/xAX~| #`B() BI$I$I$Ij H$I$I'Q18.Imĉb̟_nh8 = O$I$I$I!!$I$Ij2J(:c5yDKRy110j:,[S@Ϟ0}:t$I$I$Ih$I$IRq7wWs >矇+$I$I$Ic@$I$IJX2.t $ -Z % W^ '3f޽AW(I$I$I$InjI$I$5:Ә\W)t9 СsBF4 6]$I$I$I$;!$I$IjT23f0O 6} /@^0jNI$I$I$IR1"I$IFc)KkT 5$ۗvY}v!C` 3P$I$I$IꔁI$I$5 Ydq1_]X;}>뮃=`t BI$I$I$IB$I$Id'r!㯵TiiAVa nt$I$I$I$5b\$I$I Z!e,{ˋHkZ]p|6l !FAFFI$I$I$I1"I$Iz>^%5Ԙm f ̟0bq̞]z_$I$I$I !$I$Ij~w*] ~)ЫLAW(I$I$I$I52"I$IygӘ 8^zJOHII7|3<-[ҳgOzꩨN߾}iժ۷gĈ,Z(- ́`L&LˏI Pb~ѳgOׯfͪ… 9iݺ5[Ϯ!I$I$I$i3"I$I>Z%tnFy*v̙L4͛j*p-}v{9x/^k vڵkIOOg̙hvjJ_UiSN#`B()O<|222عs'O>$>(s΍l{qr7n:֮]˔)S?~<|A'I$I$I$ _2% b.sǸK$ɚ={6'N IR@ֱ938,(,,W^kFͥO>ZΝ;3aN=Tn6/"gώtQh۶-6m]v|njDf̀^}ad8PP'w}Q_~e>{=.RFɓ5k?VuIT233HKK I$I$IjBseܸ"I$IKdH&xf4 66ɓ'WQFѹsgyFy'|߿n66mTϳQÇvY ?z,?^{mufՑ˖-cԨQ=ztb[I$I$I$I!$I$Ij )r.g';yiC'Nȼyرc7aʔ)mׯP(rС[llϒC>}׾}^|El*v<[SÄ !zYa]BByyy۶mҥ [n:$I$I$I$5B$I$I ͼ;g>V;reO/șg٦]vl޼[qqqd^z /k.}Y<__T֥ L7“OGСP<;\115K.l߾۷xǖ$I$I$I8$I$IR!f3?grtq 9s&7tSÆ _18S8q" s=98W%K !N>f̀{gq .~qrLI$I$I$I I$I$j󸛻yGØj0`'|2ӦMcʕ7.iӦq}3ϐ޽{y79rdd;?lܸ"n#6nٳK;r  BIIJ$I$I$I: H$I$NʫLf2s?{A#5]qqAoxuHH1cओ` ط/ %I$I$I$#!$I$I3Yw.{]t|` }^wɐ^ID$I$I$IRc D$I$Iub+[@ˑOvY4A0n^I$I$I$ICB$I$It.!X18.I:u wYY䓰j  CCqqJ$I$I$I:JB$I$ItTr= ^e:1$i&LO>%K 9CQ IDAT]AW(I$I$I$$I$IQ;xxGˑTCGHO냮N$I$I$Ia2"I$I#O0[~0]f̀M`|a(x$I$I$ITKB$I$ItDLI} _|> ]D 9s( %I$I$I$U@$I$IJVr)r 0iA#hر;п?\wtӧΝAW(I$I$I$B$I$ItXvь&TB.IR]IK+z5L7}tu$I$I$I1"I$IZ+ьb1 $IW/xAX~/T5 22N$I$I$IB$I$ITK%pױ,d!tI[۶0q"\ CAgޗ$I$I$I!$I$Icy1TR.GұS!3N9L=atر™gԩPRr,*$I$I$I<!$I$IG߇c[$I$I$I$I$IR-&1!r$5A6nٳa:._B楝BG3 +]$I$I$Ij H$I$JkX\(Fq?]e ~  O3J"K@QQkpgOK$I$I$IX $I$IRÔK.r!=3}B"eݻӼ$I$I$IRP$I$IRB s=dtK%IjB/,kᬳ`bݻ^#++ۺu8x a0cǎ%ӿ  'P~Ob˖-ҭ[~N:$ڴiS/s$I$I$IRg D$I$IQ^_x9S.GRc?̚UNa!l ? g,_+VfvT g > Vc=e*C(o$%%s!I$I$IチI$I$Ee.qF]O;kmQogTؤJC}{ ..]IKKcرߟTwNՅEϦM* ,_>* azILL1$I$I$I&!$I$I`)Kk[ˑmW_U|,6bb4 r`ws|j򑕕)..Çgĉz" H***bÆ EV^͞={hѢ)))EO||$I$I$I1!$I$Ib-kpˑ~{魰rsK";vwzrWdϺulHmI={h[X [\|޽{s '=zӼy*"yyyvI +++ݻWׯm۶=S$I$I$IR=1"I$ItN.BRHa.siFKȕ-_{nbcc֭[iP߈ -9dj:Y4L V"֭}T."I$I$I@$I$IqB1=! ZؒjVXXHvvv>{G׮] ä1~xRSS уf ե"`ƍEVXA~~~dUEzILḺ$I$I$I` D$I$86)BJHj@ ؼys.YYY_b :@0|p&N߫W/BP3@-y׸,,zjGJJJa,$I$I$It3"I$Itzx|xT. :Zh9*W?/#%%"v$I$I$I;!$I$Ilj/ь# J|deevZJJJGY'ÇG.۷/ڵ xR%$$0tPZᱪpBw$I$I$x_%I$IEv>ӞA$*눐UmG;"x@ZZiiico]eǜCov̑$I$I$IMI$IRt[HMցظqc VXA~~>-[$G|L81rz=h֬Y3O8fԨQQU`ʕ۷/J$I$I$II$I&'x7̠ˑ֬YSGVVV  }z|׃z=Ƒ? Km˗/禛n>૯j_hk3goR\\L߾}2e \sQwhѢEﱼ<<3޽XuVigO>֭[U$I$I$Iu@$I$IR/qw\%A#5eWu+,0|E}]v^RRRoC\{~+,Y#Fz̩ο7y0`97x#'?c'$$FZZZǪzgddv{|ث|w>}о}z_$I$I$PftB\1.R$IjfϞĉ.Ce,<|۠ˑ͛7Wcٳ(6Ri,*W߁=F}KAA͚5;Sױx}?Yf ֭[:~6mTHu K$I$I$I ;w.E_P=$I$I;cԨQtޝ?UVQXXȑ#IHH`ܸqآ".bzABB^z)}Ys[>gXZZ2j*v2WZZJcccHW^۷ bcc.z6O<PPPmƏOnn.ݺu#77OO̜9I$I$IΈ w$I$I:R.=lO@:UTT|+--L:5/n&:,{=)**bڴi9.WK.a̚5^u6ϟ}:/" ,`ĉٵOγ>KFF> SL!++Yfw^zV׾5|A^|E냯;Cvvv5ҥK76jhh`5c1zv_,::%-O5wYx1s̡2F߾},2d$I$I$Iq"x2C p dItH~iN2$TO=Wp>䆻$u(.. **~y1ILL ./OLLd͚538WRRBNNNn#Fp]wϚ5o|>8p ۶m밾CY-m;e9V{1c1/BLg}r/K,Y̙3Cfرӡ`.B [~=yyy={6_Bg̘ҥKCB9 >l}Yя~[orڵkywO~BII >'?:VRR|[rrr:t(Æ #770Bŋ s%$I$It ̙3k ~@$uI>!t1_ c]tPXvmDX*bbbj31tPut_?a=:A}>!6m"==Bm:mJJJ8ظqc;qQTTǖ-[w믿ΦM;@Hg8}wjmttt}nݺgϞ}8sppUK[lNcڵlڴ.BA=RV^r[j۷xrss2dO>dH X1"I$I$I@J$I$0=;nDR+III̘1իWS\\wٳ_iӦV۶mk5w 7… 5ƍIMM=cvl߾=Xs[ơ8kﯼƍݻAѻwoFٳy';[A$I$I$It]$I$I'|5\ÍrrEqEPWWڵk)..X*bbb"''mСt-[֭#++T1~x;|܄ xW뮻BzVf$'' ++??_|o~C~~~/`⊐w}x nƐ7L0!d@nO>sұ}aÆWJ*7Tc*IKK#,\rfep`g'1"b膿#%I$I$I:$I$I:md#_+DEEmh)..k3,2x`喺[n 8;v0s"OرcILLdҤIDFFRPPVwyw}|%##kcnff&#F&֭fܸqу|+͛nj3X`WB}}=ƍ#&&wy[oz)LCCc̙;!3lvezt޽=@ … hǛ~SRR ͤD WL]l  좸i8F7b!x"D" =)$D$HQ$@4t;ĶZ+x#$э8kI$I$I#@$I$Iqj{ˉ'Wyb]tTG^^^cEJKKihhVX sC ^L^PPO<^HUU\~>ϳTS~TQEu92(:^"I$I$It 4ړ]:%@9 w)$~iN2$Kk+,}g  $SSS w^HvvIsѺtMg̝;ws$d∣;I"8'D#DI 8AI%HzD$=/^ fS$I$ItpskB_C$I$Iq -2 "#&& eee߲Ȋ+ fXdСtXnI|?xg]S]]MYYY.ŬYz 4`ԩS rk)g';f;IcNvRM5Eenvd'T@CK, $4C#$D=&I&M#$I&$9pgI$I$II$I3Ŕ_n!;`$%%y'H"IiGZsd/{h2 ) v;4eTRV"I$I$IꐁI$Imƣ<鄕B^^yyyUWWz6)`ۉ)@ǧ u]>VZEee%7GC !>ޮ'nM#2h; xi`GͣJU 9m@BHH'=sF$I$INtB$I$IEq-rW ǰa6lXc555_"EEEݻ7FFFCmaDFFmI'>|gٳg0''ѣG3uT?:,M#Cz|uH!J2XƲZLr0V`WJj"$I$I$#B$I$ID&,f.*&&&xQy[***Zu&(((`ʕܹ3FVVVEN;4w~,$u9Ք,ۥ'???$au5QD98!AV26QEU4om$I$I$I$I.n/{+$WxXb]C˜1c3fLcmv5(((999 4=z-IGE{v?~tJjdwqǶc+[–SD[V"IϐHozFp z".(4>B;$I$II$I.F`+xMp$(III!//V눰hѢv;"#:w1eÎ9%qj E$Iٗ IDAT#a|9{3ڐ5zғS8&TI~4RI H$I$I$Im3"I$Iԅ=+ fpˑ&qqq'Mr׷Y>cϞ=5222.ҿ"##ñ5{ol1uTߛRC M`,b"H?3=&6le3)0xl [h1(4H'4 S8L2$mK$I$IRd D$I$%?f1/p# ^<ߖ C:1,Z{J۷oE B||ܒ#լ^Uv{tKOԦq !L9lig|F6#qч>!aS8t4E}4,I$I$IDŽI$I.6n{|1H:G^^^ca\EHRRұޒ'ͷf)))G~~~=3h zH:D oH#lneQN9F9|ʧlf3@U!=id dA_N:}KiDb"I$I$I]I$I.s> `2yG]XGajZuyǏ6|X]DCVVVHAnݺyN65/61e`Xd3Y’`בf, ,$Up;ݏV%I$I$MB$I$Ile@f1^H-)ػw/gԩDDDy'thӝܦё}죌262ֱ2+eFj >&d2$,IH'L24zhoQ$I$II@$I$IRQM5L=%uY@[***a"/fΜ9TVV۷oE B|| G{?SI:Yˀё *(l ~-2O)MlນdC餓A9-F_Mآ$I$II$I.FnE,N wItRRR#//:X{JJJhll 2]$77plK(Q\\ @TT }4?x`ü I:4a k=a-kY:6RJY:̟YǺ`H"I'M_K_ɦAc=I$I$I !$I$I]C<\q:I:jZEo>6lЪK/Ě5kVg @ 8:bXvma+VPUU@LL YYYm>Jn¼IRw3iF61iyYZ=x~2_04M6@9 `I$I$I$I" H$I$lf}䇻I `a["9wfw~,孰j 4ԩSN$I#@1Qm]aM* 4Zi&XnO$I$I1%I$It{w[]$uYE***Z+/^ܹsٱcGp}YSO%!!ջ=-o444> FzzGt|J aM-5԰7 (`5)$I$II$I0YjW7-Hq-%%b1yxGK_F0"$(қڂ$I$I$I$I:d3.vq_wp7r#LcZK$I$d|hVI%)ƯPDF2S8%;$I$I@$I$I!(wxƦ1i,bq3~%I$I)$4f["<˳N"d"#KI*_$I$I'8!$I$I(F\H($I$RIl;XƲ`HAi$F3<,"$I$ItI$INjgx&iVO=E\A?BI$IұLrN">g{˼^ғ|/2Qf4p $zI$I$ H$I$uF6y:V<|F0W'I$I HcrӀrE,d!I$rjhF3y$I$I H$I$uK$I$X3"I$I OԇEA y<aN$Itns=Ε Y#8Dp&gO>\)$I$I: $I$I:HϬde\$dɏWsu*$I$,Nn'BR@?Dp"r!=%I$It$I$I:H DSK-DI$s?qą/Va͚5}>]K;5QD{:x^جN=W[Zultt,!!Xa$bbbHLL$>>Xξ$IG_rKӨO "H&1|9 8’$I$I: H$Itر[[sڵ+dn׮]۷Jٻw/11VGDD/OMM%..Hz<[nEbbbȱ.-5?gTAۿI.+ពU%ǚj>ֲ KYYY~sP9g4G)))ő@=HNN&111\JJJHI$IRI$yM~g [X (`&3yH# L`xIwْ$I$Ijb D$Iv޽۷SQQAEEE***عsg0|Q~[IHH`k~ǚA:Đ N]SZvŮ]iLIuu5UUUܹҐ Ν;; ŵzٓRRR$I$MonO$/4:"d#}wޤӫWêY$INtH#1c/I$q1s9s1LrK$I$I:y$I$Ka֭lܸvnڴ)@\\)))dddNzz:C owdddELLLw)#$''3`N=n޽׭[LJ~HYYYXll,={ $I$8i<#aM0r7 \\Ε\I:.Y$I$d D$It׳a֬YCii)k֬a͚5n:ټyscӧYYYӇEhDn!''666 56lƍY~=~)ϧ۷/iiideeѯ_?eggӭ[MI$Irӟ;Fkp'w2\U\ɕdr%I$INB$I$IGLMM ֭ =~_>cbbRgff2|pӃ, zH:*{w 6sٴiׯgӦMȆ Ϛ5kعsgHsP$;;x'I$IaB _oT&o/qw1\La r%I$IkB$I$IR__Ϛ5kXr%˗/g\+Vn:Я_?N=T&L0zӇ0F,--49m~m֬Y֭[ՋAqꩧ2xmРAv$ItB#IMc#%^b&3yG9sk_#p+I$It1"I$Ij֭[+V Vb߾};xAE]Dnnn0q)yt줤™gݻwSZZX˗f /$$;$I$b Cj2y{zJ$p+I$It\0"I$I',^ŋSTTDqq1111dee1tP.2rrrOO>a^ 6aÆ:V[[˺u(.."/_)))H^^^vgH$I!M) v;xjJ{$I$Ic D$IN"|',YK'PXXHuu5ќvi >nÇ3d.]NX]~~~ȱ VXҥKYt)K,a޼yTVVANNgy&gqÇ3Ϥ_~aډ$I$u^!?'/?0i\U͌a oT$I$I-$IXYY-b…,Z?6lGfԩ :}: Ӈ#F0fF͹KLLLv I$I/$nn39 s?!۷o'!!#G"F"111K$IҁƝM"y~xG"_Fnz'p*I$I6B$I$8V^^o{ 11#Gr-/$99s.Q/>(yseqy%I$cCiӦO>};YtiK$IʷEqgqwA?W%I$Iq$I$MK,#++GyѣG`֯_OS&L@\\\lSccckcc#,[oiӦ$Oks2?=?xਭ$΃>ʕ+y>|x0VhJ$I+XOn/]6ܥ~˗ou]ǰaØ3gN˓$IE/ͷ)yL2&xlw$I$IGI$IBǃ>Ygo_Wos=V˖-K/%11D.b-[rN  g1j(wϪU(,,dȑ$$$0nܸ6/^l'N >ĉCy@  ={v|g팃]o޼y=8ju^aa!'N$!!$v/NkzjJRRRZ&2pcƌөJKKC~Vb׮]!sm>o~nVm#**@ pX],X{g׮]$&&|{]YYwMNNqqqs7vX_sM-k[BYn'O&114L¶mZaʔ)꫇K `ر̞=A IDAT"9z/SRR.!P__Ͽ뿒Mll,'hu~;FE||<5?aZ$ X2/#_/p ^K$I$I:" H$IRc.x f̘_&Mԩ~!!sVbL4bJKK2e 'Of;5L>g}72|pL?̬YذavsOKꪫXzuB/UVPWWGnn.۷ogر\{ѳgOvթzVg֛[oň#ZͷyFNcGѣG III̝;78WWWǐ!C裏=8M7Drr2kZ#GvX{mllg 9[nncƍ|GpÈ#x7;SOwߥ#G,Kٻw/l߾??C:|y睔RRR??I$K'Lg-k)?e998J$IW+$I `sk]$I'~Sc|VZk駟@ hrz->~p7ϝ2e gqw_/Yd 3g Yw\x2~z(//ys9+9f̘ /w]233;|8G?oV5 4{v;܁מFyW5j<<,Z[a̘1lܸ X[?D֬YCϞ=s%%%pxW N]q?o='x~W^yXU.Iaxb\$c~qr/_$I$I3g\zI$!$}'c cԩ|Om]Ԟ?O~~~|>}߿|yy9ƍ(dݝ;w-Pj.**>GII w7n`ʕ\wu|Gsٳ9;]o{M%?VmڴC~@ ݻ޽{_[qq1<379a̙,_g\z~\zгgOˉn4h> 6l// ]t<  `ƌ,]Yfu9'L={G~~>QQQUl߾\}}=:?555dddu=2rH7o^ˑիWr"i ,\}vZ.JKKQ>B$ue+Y<}ڼX9oݺ>}Z+===Aѽ{w.] 'z Jrmٲ[n,5ٽt& /f׮]޽㝩gܹٳbJJJ8qb_UUթđv%;WkewkyK{qٓ .3gR[[{ص DFFlyh[TTDtt4soyIdzVs)))TTTs):/--M6$It1d+:_g3A H$I$I$I"N||@cƌ$1B|O?vo>jZl<׷^c k 6AAA:tPrJ1"ҔEZjР`Zfnjt)ĤIrJ;v̶m?u8 OmO`mȑZr>5k::\zz7ꩧRTTㅣEg}VmKŹ|3F=z{ァ?OڳgypW^ʕ+5~N fY]v+ "-Z(88XW=g7_oɤLIF/R_Yk;[.~p@Еj޼ybt9@׽{w}նw,.zLh-(Ei]@5͌.ǧ/YD җ_~+. wպUºvvnݻWzz:99GSLS7pm?{l<oQ&IÆ >tP覛n6_Dyxxwѵ^b;gkɓ'B5|޿\7o-o쒒tW_~Qbb. hzŋkTdd,XzKW_},䥙Tj6zNϩHEF SeeeE@cbI˵\4Rl-ZTO?{9uQgСCh*>CZJK,םwY p1GK,QV.圥gҥK5p@k4,8~(-gu3k0;66JI-B=g-o='uR3534p3LZ|F_L0ɓ';wjƍꫯtRиq㔟Ç+믿Ǐ]6`8ɤM6Ӄ>Xq[VmIS=eB|FΝ;k˖-Z|+ 4Oy=}ڧ꬏*Eh\z|LB章@>W^^ݶjtqqhuA:uz!5Jz#gggf/>>^'OVn5߯c^ʀY~^}U˨]v?oW^5|p992`Cݺ[qh+^?U].4!B4Y:zhۑ#Gt=zZ쨨v<777[jb;zӳZ,X{Oo/^@ >\Æ S>}q1/`TVm6kL٢E.*++e}Zbvء 3F{:vhtРU[}I4MC=tzZ7<pY(** u>|aأ녅Վ#_~~~ 7f0/ZhSjԩڻwVX+V_zWLL p(;;[?֬Y5k(77Waaa>|.\x:P@=]QB+E˵\5[5YX."))):;VcԩSv0Lcǎv3o...ͺ5k,͚5K/BO=O.///]s5SllOF 0@jja%%%)%%Eڵ> >^=zPV.pk֭ڰa6nܨ7*??_޶`\\yyy].8}4zğΒ%IW]4M_kݧCC[1LZ|F_ܴVG CաCСC׋ƶlR RHH*+88X7nA3l\޵kWM2Ed'/\PeeeUN;*66VݻwW .?SRRsN%''~SeeBBBYf)..N={lr^Fk>MgL/eY64p!Ι<uSVV ƺuҥ `-44T.L-))UW]u$*44T$?~\۶mݶnݪ{ONRfԾ}{]y啺+յkWo^rrr2x&4;vLo߮[_կǏYѺ+um~v]6JPfiFinzM)\F.B&DG zڅ>233UXXh7j#44T`G}BhxO~;CYfR Wǎ#bwReffjΝJIIQjjMwvԱcG9R֭<== p6fo[#4B5QY P6K2L[#77WQffrrrt1͚5SPP:o߾ S˖--[*00P͚ MD?>~ڵkvޭ={hڽ{%Ij۶:t蠶m۪M6TddBBB48JKKӁ~޽vPEEv+аaԮ];mV2LFOpvhfiiVi^ի2ltibeUalĉ1h c}VzZhkF\s edd2ڽ{֭[ Kh TEEE)**Jrrr2bjp;v[)//϶o˖-նm[o^7pqtt bk!;t:[F!V*))-TzzӕLɓfhGZRDD&g ׯknZZ߯4۷O}222TZZ*IruuUxxn -Y P:xrrreggnmcBBBlT 5o%hvh&&ݤr uG JJJeѣj4>}ZRݺub=3j׬Y3ۂgG***l ~r~vvmۦ\jF !y8uRSS-nOKKӁt!۾f1114h-Z8prrRxx_~yyyv!۷oWvv< wwwff[Sp ~V)//VyymgŕW^*44 3\FjFhUzOi]hga]vۂtlѵkWObbQ6mX@ ԃlVllbcckܧX999Çuȑ#:|>۷^+)) EHǎUQQ<;vÇv'''( @jٲ"##գGe˖jٲF.hnMzFINF0!hJKK})55vo}|IIԦMuA7|-Ѻukyzz<iqwwW֭պu:_XX-H 8`輨q7o^cXGl=#gg }9@Ǐ o5> aX+,,hٲl99hx]o PMtm6Bt@piiڳg~wٳG{QzzN>-IjѢڴi#ŢAٺ{X,h|||㣶myLQQQ] >?t~j v+y涐|}}#>>>f\]])///O𐏏\]]s."@:~N:竴T'Nɓ'UZZ<:?0ae>:ڡ7)U?Z]/mtY"JVVc ~X*--$]vj߾h[hH<<Ф=@8Ph+++Ӯ]۷oWZZ$W:uRΝ5zhuA;vTpppݢ[,gݦTrK. @KZZ-a ڵKeeerqqQԩS'M84l)7iFLezSo ":Vrr?ÒĨo߾z; }4\\^1$`!T;vЖ-[em޼YN<)Ĩ[nz'յkWuYF @1P\5B# ='. \bB'N_͛m;vLҥu릻KݻwWNjt4yC4DoMݡ;'?MTK&ڵkm_~EQΝuui銍Ult5^ЃzP! H4$p۷O6mҦM/h***bcc|PݻwW֭.K=?{~)6q=:/,} @S@ 2QPP۷kÆ JJJҦMt!5kLڵSllFxuڕp6SJR*Jwufid!0-^p W@KFB)))JJJ҆ qFJuUWGW_nݺ=Zz?B/M=@-zFkjƲ.k?@#PTTqFmذA5\qƩgϞotpy3Fz Yf}OK4K4O. \DBBmڴ$))I V=4k,ꫯ).N */9;_ںZ\+/Kv+^4EB HOOڵk~zmܸQv풓:vx~튋S֭.I;VzE륥tqZx+@F.!ڵkm}]W]u8KfRi3FzyǯEFJhշWS}jt9" p 9rD?6lؠDm޼YNNNڵ?+!!Aqqq0T@U]JmJ{owuÐkin-F.0!'O?)11QڲeL&v8=#߿|}}.p6IsJeeVZ*m\M@ܠ4Q5ESW} K \?֬Y?ڲe*++եKWgV޽ctVi=7Ν+0&ZUzBOh],YFk֬QbbeX4`=ݻ.pڴvm**fͤ㍮ /y=u&j% @@YZJڼyYf)!!AF Əf#R^.mtE@ݦ^zX;Z߿_|_@E zG4p@y{{]&b5J_x|Rx`I4Oq* 2$pR駟jʕJIIz=8pZnmtK-4T֯njzVcY7$%D 4yeeegiʕRTT_~Yruu5L%TPP,eee);;[tqhggݶM-,PHHP]6P'hnԍF$:uJ~>#}Wǎ5vX 4Hqqq2d,ܔPNN233B999ЩSlcPWEY}\UTT THH"""pm 4b$:ih2@ 4G՗_~UV髯RQQZ͚5KÆ S.pl(55zӧOƘf(44TٳbQHHyyy5o˖-JMMU^^mWWW+44yZj%oo~4=m&].!vQ}G$777_/KEii9b83777h a(**JUllVLLLTZΝ;tƸ; TyݠZZjt9<'Nhʕ_EC ъ+4`5o5,ᨛG]z(66Zp"$$D&,,Km$%%Q3DDDbOTM}OyʓYf@,믿G}O?TEEE۷x >\FMJ]zdff6j8bQDD\\\ Sn#vIMMUbb222TVVfsuuun#M˟'ݯ>ݺr9" mܸQ}>=zT^{~i3F-[4D,եǁTQQaغz4T6rf'11QUeemlvn Ş".oyfݬ1!ٱc/^e˖)''GzG4zh]4Z%%%:zu[FF?nԻz4Tu6r1_6ҪU+5k??4t(e)Ka 3p(?~\˗/כo~Imڴф 4fos1kaXgzyjr!X۴i#??K1Mf,/yc}jt9 Zrr-ZKLC ܹsu 7d2]jW3mjʔ)۫ymD#VS-%%E{wHyyy]yIvk7SeC!h$L2ikF] :tH^|Ejɚ6mfѥhd-Z &]j[})??߶g~Zu[VmKq>!ŚKCF )77W6mқo}'j۶ݘ?z㏫W^ruuUNNVZ'xBcΙ3G}YkNڲez!mݺնdR.]oSNڳg&O'j6RS@n#hh%:e?ݧ.8`2|re~@\=3ze6E'Nѥhh:! Kz-Zg}V[n$)33SÆ ڵkկ_?1}vlnBmf,"х$..NdFhJa;4>BqȢit)! \=^ .lּytw\@5-4nRAAm -bcc>"##e.͙3G+WTvvf I&ꫯum4c mذAΊ5`ǘL&Ifg[o$iǎzG~zIR\\͛N:U;޽{5c ]ֶHz5e}wj޼맗^zIuh.5]?\=ÇoYv3XRݮ$ܹS3f믿^/R5Uc͜9S?$?V}.&Ld͛7OsΕ$͟?_7!!:u&cǎv9z`4٬Zܟܫ|ΝJLLswwwzh{:atz+,,ԋ/Ms/KPuꑖӧOƜ#66Bᐐ7e~uQ~~~JII<={a=z.\^z8qXJ_GRusݻWٳoIWСCn:ל9sM7TyXs=z+//OӦM?zNͥ6tP7|B=#zoX{mǫzR뮻To>x㍚3gm5k[n&{oSO=eWttrսޫYfx:[m}jWVVV֕l:Stt=z^璤I&)??_K.T9n8CӦM֝wy9;VW]uUז-[͡ۑ]'O*((H'N͛7W~~\]]zl!?,Y={jРA:tu_O?M6u:beggWSwMFFlcm$22R Crr$`VjѥL&/_Q/z"jʕ5kRSS5e=v B"@ͬ]=ukWG rq᝹:u׿*!!A͚խspp6oެzmS[M ?HT߾}bw'OyΧ.s;v]7yxxN'Rv>@/KRݯgM*$$siիW/.ގ횞8qBvG}Tk֬Qnn]|ڵKӧO׷~k HuGqdk!!!:rHzuhժ7___r-[tI?^SN;Smo8 ,٬9u222Lﻬ,رCWVnn]kGpy=z5g9Bg@Ôx@=zPii6nܨ˗sk߾}JMMɓl23qׅdRIIݶcǎ]ڬt1UVVV:ujoHƍ'OOO%%%6qI1B=:udZ]g@@rssѣu! UNNE! f{>p@}7uLOOOη<!Nee}]͜9Seeezɉ)` =/**RNNRSS}RRrrrfl6+$$D{b-$$D&ROI1LPxx4zhׯNV}QbbƏo۶m63F))) Vzzڶmk۶~sȀkvM6M7oN#qF-[L~~~mt8qz;î:_>ӴioTCBB>=Cv?S%$$ìkoV۶mm{衇4h =[}'Zp;I\W{6k̮$:Wnn*N:_~vkhM0A?~͝; b{_ii9bBdeff6Ga, IDAT]faXz%OOK1=ܣ^xAK/tօ_5|pkվ}t]w8{L/a_:?GZvneddhΜ92dN>}U֭ĉ+]B5ő^zi̙zt͝;[۶m?^9sO>D=3uc۷|||4dL&}?s+77W?x o[ߪU+=3JHHO<޽{CZ|^{5^ƍ?N:Yyw}vd2)))rW\\jcPYYmjk"##lh\gճ>.]vjtYΑBCCvqZjFΑu֎ w:~4l0;szw5c %''+00P&M_$^дiԽ{w:uJ_^ujj?n6eeeUVpBTT>c=裺{T^^;_СC)խK4[\d=ٳծ];=ZdYh۴iTRR͛;s]bի5c M:U&IzҥKSmgj۶V^3gj钤뮻NWVttt׵g:&uMvnVU7h Y,ׄ TPPO֭S6mlZxƏt9;;C뮻4ydVTT Uz8Fe:bQ\\8+.OJ@dr-(2u;wN'10ܢE4a_^^EmYYY*((-Zm$22R^^^ 3CB*//9Ǚ?CCCEJNNZCYC}hp%ɤ˗k(t*++w=ӊ'|v]l6+66Q3΢~߾}Ϸ_5 . O>5?%%UvŞ"޽GYN"$pt5" ruz\mQ*ZkUʵmѵZKZEb%(ZK9s8$:&' |pq ss?sou;tشim!I$I$IINL>C[o1vؠK$I㖑AFF999 sJKK^<8|p|t2d!{$s!m`RJJJZcСt=I$I$I@$IO^z9]=6nȑ#G;vlϜ]=$I$I$IRGa DNfڵL2wuYW$I$I't9x ۷oO8˗i&ߦnׂD]G)߾j~>QTTĞ={4գî$I$I$'$u".SL3䗿%vZ%I$IHII!;;;>q!nWXAii)7on#pTOcX#{J$I$I$)B$I$꫹ Yp!iiiA$I$I:N'Haa!{ochcMcA߇$I$I$IOQ$I,XWUf̘O>D I$I:vDa;3\w>ˉFmjvIc+QI$I$I$u|${gn~aBP%I$Iڡt9t۶mK8񿠠6EÊ[ Y@HMImfdРAtҥf9p;vhGAAEEE>|8MݮHknWI$I$IOd$I^xnfN̙t9$Ik׮bo6y>B*L<2Q;GqQEl-Z+DŋٸqcF:N#l3QI$I$I$&!$uP?ϙ1cwus I$IR'PEkXC~;aD y>OzR:.juLϧݻwǷ}n#8pzv2dL$I$I$I-e D7od֬YA$I$Iǥx]",1y ?nn# LؽfM6Q]]ߦv#ڵ+}9QWAѣGG%I$I$Ib D#^{-\s .G$IS2G:Ӈ>\E|2qpDKKK#\9FJJJصkW|z0,:t(ݻwo1B/5k߽~=,//+,,*MGzH$I$I$!$u \ve9J$Ide H>X-EpI$]jZm$Qtu$33OS]= 5^۾G #7)SIOO?qO$I$I$I:%!$u+O>ˤ]$I$5@j9sas8RI 9F>͛)**bJKK)--B&55M^(//r9__~dff2h 5j ++Aivtn FnD$I$I$II:[oBѣGH$Iڑ 7y&Lf2> vK. 86ޮ]())R)++38#0`@<,Ң7<lSʕ0|qL$I$I$I'!$u?yYx1Æ I$IR(c9#,Rҗ GxILbxٓ={rgƏSa իu%I$I$IN@$I|wm.Ҡˑ$I$`{x]@'42۹H TDz::w\{-K$I$I$yɒ$I3f w.G$IFb ky(QF00q#ԠKΆW^ ߀g "I$I$I$uB$IjǾa^~eʫ$I$uVG8|,g98@0"d&s1tj #G…pwtE$I$I$I HNw??Ã.G$ItP߲iLd"8P]ܹp=W]$I$I$I9!$Cz\||A#I$I:xxd99„a6!KU{ ?^ ={]$I$I$I!!$3<wD$IԔ v~.q|A-^ )w$I$I$Ij'H$#v?!wz I$IR[ۼMy_t;c=C#IPХ0^y.Mx+$I$I$IR;c Dv?r-A"I$Iwy7> DZ%B L+].UQn.<xd8A„a6dzdzu0=zE0z4\w?K$I$I$I$I+W_M( I$I (@$L3'x˸ L  “O]$I$I$Ib D?橧 I$I06wx<BNTF3{F2 =^ gtE$I$I$I Ih"t)$Incw. YM2ɜ\uDpЅ.A*kw2N "I$I$I$1!$lҥ\tEreI$I0yw8!„a6KAKs}i|KltE$I$I$IjC<$)`>wuWeH$IR (@~ͯ.b<.r0 2eO:A*u|Æ0q"}7ut$ITKPqn֮]KNNN|y饗x衇{8s~#<ĉXd %%%.{++8KY0z4\{- $'7Iy 7o> ^{-s{_}|/ܹ9sK/x$I$I$ITB$I IMM|%]I$Ij-Ϋʽ(FM67qp;US LfSK.r-<3L2~2uZL05k~gS-I+_{\nl̘1/}L2zSN%??ʕ$I$I$IR!DEѠK$It o>B"D3RNgΜɧ>)~aKuu5?yMJl~_pwr3tPFUW]ԩS B'nS7{ [nipաC[AEEE7oj߿?'dI$I$I$5@$I$IA<aD0DAFҷo_/OW^!33s=7N=@VVV3l0?{zE1V]{-Op0x0\qEjҿl Aj-߲e '\I$I$I$5H$I$Ij ()b:G?F1Gy 2xFֳ'yiLk0H̬Yя~Ç7ov[/B^yf=VJJ g}63g7`…Q9?\1?ȑ#yW-_h#G< %I$I$Iv$I$I:ml-"< l8󹛻a$#  T:,>Os?ӧ׺gҤIqWʕ+yGy0a3gdĉdffm6͛DŽ 1B3P\ S{A-~ロ+^z1i$,Ywx]$I$I$I`I$I$<{(ӟ븎|t` KlrmaYf}]ֺ/''_ 2l0N;4z!:> /2#F ==S]]Աu /))po_'? =C aȐ!Ck99„a6$z+R%71_'Nĉ[:$ӧ:7/IID7{UW]UW]ڕJ$I$I$B$I$Ivx7 x}B?n.$I$I$IR H$I$I0yw8!„a6KAKO/oC]$I$I$I` D$I$Ixd KN2d<<.r0 2%uL?1|+0x0|AW$I$I$I H$I$餷ͼ;AEʩf4r/"tvn 68t+$I$I$IRB$I$It>~]@Vd9. ].UCi| buVI$I$I$!$I$Ib kwxC"Lf3KTIj?B!?.N+_$I$I$I$I$IRT@A<k~.vEh(d@"X.l͛ H$I$I$!$I$Ij7(c9#xJK_.B&1a LI:͙EE0m̝ fCG+(+``k$I$I$I: $I$IR`w'4Xn6"D8sH")R%I1̟_eGy@$I$I$IR0"I$I6SEkX,e)G8F!0qtTW;|~ xqѣk$I$I$I:$I$IR* b{&Bb.7.S{  ̜vuI$I$I$ H$I$鄊@<(8L|Lf(C.St,.}uQ H$I$I$2!$I$I:.[R@,f!9CIA*I:^7I$I$IB$I$I:_c';h2P~vЏ~\<\% aH/Ij9VW_o~ˡ/ O(I$I$Iԉ$I$Iټy37of׮]׏L233֭[;-B"D26[ۼMy,a @7q>s!HF"Ԇ#$uS@$rk >|kȓOW d֭SXXN;AK$I$I$u\B$I$IROYYlٲRlRkYII [n >ΩʀׯYYYdffү_?-;_"WȍTP@(!B,aI@~.ƻ|1Lg:"g<)5IRGg y'p,] 6aV2)++2JJJ())cJJ 0ŠAbdee1`b$I$I$I輳$I$IR***(--R***T";;;Z˲߿?wnp֭#//2***GVVV}dffԖO1n.3!p$~_UɛߒGXA&L$&ы^B B3ϰkN~|LõDfcg_X]\}-ZDQQ?nc~8 2 -I$I$Ib D$I$5[eee˶lBuuu|Zp8رc0H׮]]OFF4Ywc:Tou' 0^ R,g9s=ۯvs9li&B,X8pzdk]s:*+&;<ݻ0puRc` %QX$v;|@$I$I$I$I$$wvdУ9ap8peFFF@;*--4mtݦB/֭kV襡#- 4Z+|I$QMuv ¿ ٷ$s'>bvJ>}=kv8p ={l4'0Zx X|96mb߾}muu%I$I$IO%$I$I^5՝옛0п]IKKkǡ869qHSJV%D!D6O$SHt(,,*M8 : A 2CTX4Q+(--e9r$MlC]GBm1TI$I$Iԉ$I$i{E충x ! 4.]8Ž9W ]XfTTTPXX޽{npk?1oADUk(XAB1Y>:tm۶5())a׮]mRRRݻwÔ)SjgLzzz#k_sOZ^X@ݻwǷynPhdСt޽-)I$I$I:(!$I$eEEE>|8]׮]ӧOPG 5 0^z8B5*tgݺu|4#[HQHN" AI:B4t44rSN:$cA.uo7mT+Y7@[/hIRRR#뜺vڬ|c|7??ŋ7ּ$I$I$ H$I bWn*QZZΝ;km u---p8L8nr݊ 6l>f.`1TvG%d=`W&"Һb߿?ɭ?hI:I3vzЩ ##ltcǎYTP;OEEEIȱ}4 ZsԮUTT4#6AعAN:dddۢn#5kk׮%//BEu"5o bT$I$I@$I$WYYd'28|p|՘kNɩ7i~3d|cjN0M(((` '|7ɴYCMkNn(8diIm5ocvJ>}u0$M,''ujv9(((`lڴ}ŷzГ$I$I$Im$I$u(MM@y9r$] 5'p:]:6IMg&rǾ<Ա:tm۶%]~=;woS!'')S~5oK$I$INVD[$I"MMh4߮p8رc-ŮTޔj+k׮zh*8b@Mj\cbˊسgO|rssM4h]t pdUstغuT3,bG I$I$IRg I$IR M= oZkBtFF999&L;4iK, Yj^<''u+++~ncW-/**õѻw&#gϞ=dĂ>3IuԼ݀!+I5edd۬n#֮]K^^^$ Լ2dm1LI$I$II$I8; z{Z֜E8N8-&k}T9Ezvڪ5h1ucԯ1[K[u ~2o<.]Juu5gqַ{)--xG41iݺu 'P ҁ(--m0QZZʦMطo_|]=bA[XG?oh軳=:q}<[ݒn# u)kv9ҥ }M>^zSݒ$I$I$B$I${%&"ל' '5[*֚XYYIyy9?>/K8㌀+mؘ:jo~j,_g?Y|M:,>nV&5dddzb۶m ~_|rJKKٹsg}4"33:&EnkGtڕ>} ^&Ewh_=-ZZsAcƺ5 Va8I$I$I+m$D,d:Ӄ.E@(b…LIjzꩧ9sfe&eeeRUU.55ք& <Gx|BSO=?̇~HϞ=۸cw#JJJUV1gf͚Ŵia̜9ɓ'>.tӹ裏mGq?_o]vqwIMM%++nMַvZ.2N=TzW_Maaau-ZرcIMMeСqٳ: bP(onco&Z~z/pD ٳ'z*_~9ӟלת!y?-**+$==/۷{̚W=KXn]o"k֬atޝ=zp饗o6YO-o9kl=FkM&ӧlƳ~Mnn.SLa̙<< ,_s!Yl ,`L6p8LEEyyy̝;|+L4:^z1j(L׿>^|E/_NAAAWQQ?xꩧx׿Δ)S5j'f̘ܹsˣzw零׳dɒpCAҞ}:criLswyd"}m{NؒsU4D={vXj*JKKٿ?%%%ZE1gLBVV+SNeԨQ 0 =>n8OάY;w.?s{,:{3H$zgFP( FptرiӦEo}'-Z*ZRR zNS|}gOΟ??y͛O?ttO>zG-[Dm]`A4G]yx Z^^=p@tѱc6Y_Aulݺ5`1cn D|}eeen!z7hcZ-4iRtŊG_&# :f̘ [nVnذ!^s_D5v7x#gϞhaaaꫯtM>_۶m?uYM:޺>>%K틮]6:~zjkhys?%eKSwyg/o%%%]s̉~]zujɱ:avX_k3mڴرcp8\uȈyH$1cFt{, /D;yCS':FM61%߳uǑ6yc6uu"-{Sw9VZ*2$IM=gE#HկFEhso߾џg]vE{aâoV|:zwD7n=tPtW_}5:y$I$INCMCSL!B,damt<j%I'z3g>+++)++4~[QQQoٖ-[oJFFdeeo.8p ]vm1u&Po>޽{>9f͚UkG}> g?=Sk'Yt6mD޽lذp8h]3f`ԨQxgWpj~z+R7|41D5B!z-&Nجb?妛nG>,*Qm}?Ķ}嗹+>c."JJJz~_pu5iulIu]\z̘1Vk/Q=u7ZZ6*,,dҥ̛7 6rJj\eee_e:t(]׮]ӧOЫWGv׼]~=;woB޽kGpcۤd~~>/Iұt>a+y7ɉ߷}vN?t>c 7g]w_W^ᩧwIOO=zX$I$Ioj DZ@N$!؉ 8p;v4H4 ##рGej}MMܻw/񉝙{ 2z6lϧ6o̅^ȺuP\\ ygπp$$I$uNB$1@$%@'|²e())a֭ɗ7o}Z_~׏L233߿?׏,233ׯY IDATvZk MǨ׾ƚ5k8Sطo)));p>|.]Ԛ USZZ`׮]|W^a;?N[d#!"ڢ XpVvVթc+mXmKf jTCB%na pody=yqsn>߻C}fƌ׾F\\\uVGKcڼy3wy)--m4y.6փYז9Ǫl@6t`miߏ?I3npEKMM 唔PZZJyyy^cwjj*C !==~GS޽{yꩧذa6mbӦM#++H%##l222"--&tjo6#[K{u${{k;G]]eeelܸ6lPBqqqIOO !C/n9ԐI|p˃wq; 4:rss?ƏD;ёss73l0&NE]ą^h^$IC|_J$I:ロ_~3N:N8ȤҔTGܹs袋"_4 h hk6tK==Ö-[Xhw[oРAE3fpqQPP@VVVùHh,))ijcՒ>6[JKK;p@lrH݋ػwo }Ͷ;C<"G2JJJX~=ZիWS^^Nyy9Wn_LL >,guQyʕ̘1ϟ'9餓8S'> EYy8~=:χ_gubbb"!Ʀ***">%Kb PTTDQQQccc)((`„ U$ 4K.s/!===}UVs1mFII őeuO>$sitURRRHNN&33Ȳ!C0`G/-\`ԩ_~/vf7M۰aYYY 4+iӦ58Ygşgf͚hEmd?4zvsS둖 O=T}Zh_hq3gΌ{뭷+YvMOOc=6W_mZ<-Ӟ={Z<&7]WTT(tѫWqxڴiJ߾}Z&L`lٲY%%%[+W3ϰcǎ~Lrss^ЧOV37=\>#]|hcoomsk>1 i]v4Iaf͚9M7<7Fߟ2e _kۼFѣ>}:999B$I$I: H$IILL$33nprjKT׮]Kqq1eeeGk89Hff&!xUUUtRyJKKy߿dn)SЯ_?.BX`s/rdٳgsRWWǔ)SW^k?y| >۷3gΜ6'Ϟ=ɓ'ӷo_.bbb'?ImO9n&n233)**oov 믿θqx饗̆ 6#-3>P__ς 3gJX5hzEGG /6Z{[o/L>O/}čL6[n{wxbzm롵cԤIӡ͠dž صkW}Csss[=v+ExbmII ,\ˁB#Cֺyy0}Gwk۶mBMIM옔qDBjNnG yvmL6D?|zҥK{xg8ӹkЀB@^zrssC&M ]ven!tmΝZ`AhM6cW>NHHeff;c=jqUV9PRRR()))t9VZlw}74}PCIIIO>9O6&???tEs1o~󛡝;wYիC{n()))ԧOYgZfMd>>4|_򗃾éҺ!5kBguVO>йZvmXVC{W߾}C~EGGk=5K, M4)ԫWPvvv;hx6o?c{0=k}TUU6mZ|yh-; W ⋡> T]]cY|yhs%I,XBzkۯX"tJLL M2%K/>}zh^z$I$IQ)Ij(<.7ży]YIz_\s5A.{i mܸ~ 8#C aPXf w~aХk+VxY^^N]]]dv">>>vOUUUJ6l&OHiСCY$u-uhaR /99rssINNpT+,,h$)X}g>))) 2$$I$I7$ItILL$33-mM]vs.$**3gһwop=MUUUQQQqGK!D71$77㊂ӫW/rss=vHHKIҡk|:u>IRoaI$I:7{,I$I$11]MG}qذaDGG82IMN$I$3"I$I]XJmM'']][ dee,Iٳgw5 ᅯ}3}t0%@mwޕ{$I$I$S3"I$I=Dbb"涹mӫ7n: ضmy$--9\IjdѶl߾ѾMZ|KOO @=UFں~KK/u7 /ngw#I$I$IRg D$IL{PUUEEEEW/,,d۶mlذ]v5mG8)Kj+RxYVVhhӮHL4{Tvv6qqqP|>ݶm[HecYIIIdp"hQQw^n,TpYTTDeeedM'>]F}!m -u1`׳W-u/?#"4}^F$I$InĿK$It"Bt5qh<1 M _H hYky^sfȑ>$(iogm۶:ڵ[m۶Egdffo\?dsLcF|/z!=0(5 egg$I$I$If D$Iމvgk׶CkGй4zǽ=frss[|ܽ$uN嵻Ⱥx65 2O6l&OӰ@ӎuu)..P('ܡ+##YKI$I$I$gH$I$''v AkuQPPMرcGVp$##tP=MmiI9ٖ'xj4.bbw,8P ??AKvDUUU#|߷ԙ:uj$I$I$I:UA$I.*!!L2339rݳg ,\ńRRR deeѿ=5JϢ"jkk#իZcС7J:lyy|Rm蜠aW]*AҢ"*++#4 1u$;;r?ap#Oî+I$I$Ie D$I 11D233; C-֮]Kqq1eeeGkzh)8'VUUQQQqGј"*^7v77iЖƟATTT@#$u%*s˟$2|0Q} ä:hHxxF222?6=i)RX:333!&&&qI$I$I@$I$Ij$11\rss]uu5[liSFJl߾Ѿ]mi$==CGɬYZZJ(jh)4n: ٸq#;w0<4,^ЧOZٴi;v%//Y-v$I$I$II$ItH#W?ЄQKKK)--M6Q^^ŋ)))aDKHH` 2T 55y햕QRRByy9B{L89#4233IMM%>>Y?q۩y H{(uq 2IA֦6RQQAqq17n 6PRR¦M(((Y83-- "?#++ 7R\\6"FEEE 9nt)))GΑ$I$I$I B$I$IQקOF#6O-//r֭[ǒ%KU\;YIMM?RSS<+7K,ͩU\81baݮ$穧yx |~ #.%%N8V,ixxW?lX?rHX$##l҈I$I$I$@$I$ITRSSIMM=Qχ+;0`*SYrJ&0;Y$I]S%<akY$&1y\%=T\\YYY. -};(I$I$I. :$I$I,xK˒$uRr M6䛜ʩf5p0$I$I$II$IK, b3[A%ID}Z1ͷ2 :JV]$La gp5/PH!Wp1]$I$I$I1!$I$IG1Ld"stI2)L^E^e1-$I$I$IC$I$I:RI9o-.=$IG"q:s*F!$I$I$IRG2"I$ItEL>,e)cR]$0Lz|#I$I$I$$I$I:J&3x9Lfs ,IAxw˙'L 4I$I$I$pB$I$I 9o-.b-$ImfDNmf( $I$I$I4 H$I$eQDq37Ea zeIZ- c21,aIeIRMd&Hoz&o2 fpХI$I$I$Im2"I$I9DN `6>$f3c9,/E,DN 4I$I$I$ H$I$hXBn~O֠˒nE^d{!?d5 .K$I$I$I:hB$I$IEEf5'qYtYԭTPW*gs6s<{̛7/ҥK꫹Y~=_=3f7׏~?8s?3StYtma p|WJ:Ac$I$I$IRI$IԥÇg<殻o<&55yVd&a A$IGhF5Plf{,$I$I$I3I$Iԥ]}}ӟwߍ|or4 /hס27yL\e6$<ʣLa y䱂L`B%)`%I$I$I1"I$IaÆ5[̶m"_l4JKKfy/}y'xyGT),I:h_' `@e/I$I$II$IԥEG덴4˛///'==hlf cx.Im [89?g>;c$I$I$I>M$I$u{cǎ駟n~;6:HFLcqE 5A%I|G,a ri% ~I$I$IԳ]$I$I|?0m4^|EsݻsF_;~ٜu\JVO0!A&I?2ʩ?BJ%~I$I$Is!D$I$u{r =w999p}_]^L *` cxg.Iy+ _93 _$I$I$v$I$I]V(j.".袣]R)>u\7ws7q],D~>g6.I~I$I$IC$I$IR҇>8(r*A%?1?a6 "I$I$I$!$I$I=Lfo=<3A$ yc 2_$I$I$I$K1"I$IC}OeLg:pE5A%(38^\ $I$I$I$1"I$Iԃ%\8(r*A%+)LaX26$I$I$I.@$I$IL$N1$uC2Q˼1tI$I$I$IRe D$I$I,c_\bT]nRL%^"̠K$I$I$I4!$I$IE/0_k1&1u ,I]6p"dD$I$I$I:B H$I$,N$3?$uQE1)d]$I$I$I-$I$IR8+U7$u!d&DaI$I$I$2"I$IVs#)|tYl!D $I$I$I$[1"I$I6Mg:XF=e,tI:* gyA $I$I$I$1"I$Iv^uj>L'$u23b,9]$I$I$I-$I$IRsÓ<<$&>]N|,if4.G$I$I$Ib.@$I$Mw_j.M븺z `,c\ʠ˒C~Ϙ|N㴠QWUZ ? u p-ޱI$I$I$uB$I$IRw}P^11Z忾$!(^n&>yxMK_+f3s3=rԕ_<ݤy__uupB$I$I$IR&$I$I*hs (0x,dNf k.KR{7k: uu'BfGk:ط$I$I$IRe D$I$u~W^ &;N>cQ3q+XA?i><tI:Vr9s*~t9`ƌ}Z_m%I$I$Iz(!$I$;oqqpN X9k|qW1|A%(K{b "uW^`-ٷ$I$I$IRf D$I$u W]kj+:(XN)1լ,IGɷ6XƟt9NFkÇèQW$I$I$IR'd D$I$u _BW ԧ`ȎGY> VL2#<tI;~126r͘r4.;I$I$IN@$I$IWj>.`j e( 7q_d&tY?nF>.GՕWBmm55p_$I$I$IR'c D$I$u3gBLLuN be63ax.KaxΠQw c6FEq$I$I$I=I$Iu\y%h0 $\JV2L`stIw.į]aИ}$I$I$Id D$I$u!p) o.#l^en&nFf2,Iax?#QO5eW$I$I$IR'b D$I$u-3fPOXbl_y8V*$v%g,WqU娧HMO$&8ӃJ$I$I$S0"I$Iu|fDQu9Sr3AL`stIuNU}0x0IIIݛ~4hPzXblN4*1yc4.M~[5\W*q^娃׳cvEee%ܹ;v{n*++ٵk۷o' m6vEmm-{졪*++#{n[}_wyr2z"11ݻwM>}K  ,K>}ӧ_~V$I$I$)hB$I$I! 6PTT|GP\ZJŮ]1(:@z'" z1@&? x55;̝?[bb8:Tkkkt11 0CKVv6Æ #++zu#3|,Jd";Ŭ˒-]t): uVnJEEfEEE[neΝ|ǭf^&bbb"ˤ$0`{| y >>o?+pҒJjC*;w.<ݽ{77o.ap%{-!ӆSSS2$I$I$/$I$MeeewwVfƍTMv\Y $ H/ &iv} T[@9[hV6\qqJ ߧR#ؑ#99c1bw\Zն! %^vnFnU^Qe.M )Q _:;wqF)..,(--r B߿?|0tH!III?jkkٵk;v젲ݻwG޽;v4 oGB7 ;DEEJjj*NFF 2T"AI$I$I1"I$I"Xb+V`ea!+ )ol,#bbQ]B!@z}=dP6ͻo;WX>hF ƘsIcРA4/f39/&0yc c.Mq|ILr.">H7G>w}5RSS#!!C0~xRSS ==Q8\$''7hr0jkk#[6 lڴrV^Mii):$''СC6lXCCZZڑ$I$I$I H$Iԃڵ_ŋS˼ڳب(gLu5 |Ȩ{xS+wo׳r:V|?$IҘ4y2LĩʨQۆ39xL.b.KQa )M$֊ywyՍwY~=UUUDGG {СC#'|:p@'55mCPK8?ի1c=6ҭlĈdddI$I$IN@$I$I=Hee%ϧॗXv-u|"!I\ 18!" vJ0btEʁee,Z&&939#:::tNq;-+c<]UPȉA-[ozjyH}F&_z ><"++G%**48Ė_slܸ1yyXh=v<:c98=z4'<ߐ$I$I$1"I$IR7f{9_%K夸8Ψ濁I@Ꮳ"8kuuk={X.UVj*~mz-֬YLLL {,':u*]w]d~zzzХ'77_RR4^Λ7ѻwoFѣ5jT$(#$I$I$IGI$I+W2o<7ߴAqqLPK쑢\ Au5oy.O]uӧO'555آ;)La%+L .n 4yGxKߠx7Yl˖-cҥ|G 4ѣG3i$:Fȑ#իWU; ##38={f͚H@iժUF0xf#%Ep ߿?uG{,X3fptDDkР9s&ϧ[n$%%QNnΝtDI$I$IN;H$IT mذ]ˆᳬ,Ju\azbذa b04Zъ4҂%k[ʫM8A9,Y 7@6m7o>,+W'>>>xie˖ X~s璐UW]ea$I$I$I'!$I$3K,vX:kggs :$;&xmssD>}<)v+Ρ3I"= :T,ʫdŭtjڵu]4oޜ3a͛NJwDEޟJ(IIIaڵmۖ[n5kO$I$IςI$IŋsAvTgΦ KQd *SA" e;طo):yq.Rֲ6XR=ȋ-Dt歷ޢyL8ӻwS2v([՞=ArJ~e/g֬YL:MK$I$I͂I$I͛7s%pVdgS${ׂca(Nt9)ټ{$%%$X!B f0ӘR@Әt,xrBdee1`~;~Hxx)˰x>hwpuUi-gϞǼ!  ȠW^DEEQfMnF6lP`߃\ 0`a{\xy߃{w[>kAm/˗` G,X׹98SN͗om۶ŪUM:޽;T\.?PU-[pҠAʗ/OZ߿?_~ekh౗.]5\CjTի}9VI$I$IcA$I$I3#FpaXA=Zc}_9OQQ fo..~,ZBY$=m#{ Oz+7o/ X{P%et!{cF)x b0I#,”c1x`}׍5;p/^o~^֭[Ǜo=|7͔)Sزe ˗/g5Jy*Tr./T|䭷"!!q2 0;3իWW_cԖ=a7|37nd̙lܸ{9.{_vE۶m%KHϞ=Yl?#7x#zbŊ sn6NΝꫯ\r^q>T›oזMӦMꫯܹa}Ӈx_~3g}vzq`7|3UVeƌlٲI&dڷov]=qwrj*&Mt8ڶmy$I$I$IE=o%$I b<㹞냎@(b\ד$(99C:e233Z2/eeq)p>6gs-04ƻ-o1Xy@M-|n)DsjHF<>9(:7ǫGXUoӯ0 ŋSF ǹ>'N$999o憨(V\IʕOqdiNsэQ8_Ze˖'d+ (wߥW-^]rBmi~6m.pF0h 6oرc9sxwW_}tFYnWsSqrssU.>\r > C%-- [DqEz#ΐ.n)_M;GE9GEEO?׶|r4hp1 {^g}v"wcѢE̝;7(:̙@6mN"I:|7S$I$pϛ:C$I$I[`x =h)jd:\ch?S>_oEϰe2AN99|5cF1TME^q'DVrU 4FMϞ=QӧO窫קSN}n޼9!K,t大smNNׯէ;ժUc㿓|g$''M2>}^q|LJ00B!z'|ʕ+ٲe \r ˗g{ ~)ݺu֤IEꪫ7I\\\(o׮>In6k~駠H$I$ITbX"I$IR~bpㆿ3pjq%cT;s9ͽcaZb[x\o,Xw> d әOD+Zc@&L_듑Aƍ ByKtttǍ/gMf͸[8q }xY%kFs@B+Ǜ6mZmOjʕ+wR_0`gy&y߇w[TT)}Æ ԯ_?< ˏ?X}v., dرn`ܸq\~G͵~zbbb u ꭷޢ]vu]DGGөS'FIVVV/X >yf$I$I$I!$I$nq; 4 $17a:J.㠾Á9;gۀbhqІ6e.]\NIsJ3p5/0qDbccɽrʬZ|KNewa͌7:0|p pJGnv&oҟE޶8^իW+RtMDFF2m4233c\{<h"ߺ6nx<77;Žӽ{wKvoA߾}k0a6mb̙tcV^P(Į]4KK*UxYt)˖-㮻bܸqyTm%I$I$IY"I$IR6mJ(렃`:{gv'<ct-Bn)0/p0-GsgZ)zWU2ǫ_+$G*dx/,KVӘF˹|ȇl-ZдiSʢE:t(>(6l`|'\qSN3+V͚5kxԩ);o@{S:Ǵ}qN˖-Έ#Xr1SIPNf̘AVV}gu;\CԬYm۶ױ88CAmEOJ*q]wgoߞ(꼂UPoX"P(oٯ~ی7 RV-}QFE^8se ;ޫW/"""ˏk-[믓DLL W^y%]w]SO=Eٲe9󈍍^;e*T_LʕСkwkEqdۺu+{N:O)$I'C*Aǐ$I$IE :$$!Bg_"I$IR1kc2-"D \Ay衇SbӘ*VъV6oO<*V$1"}Cx89_}EӦMtG?f3 lĤ#IoM,$tSo߾,^aÆLÆ o֬Yt4TYz5M xǸ&IK,kYK9AG$I$IE`A$I$IPI9 qq9`OСT)U@n/f͚A:m593ɥ\ʕ\`EVб#OF7Brʔ+W%KpRn]g}FnnnRnn.~)_=ge,[aÆQR#JԡduAG$I$IE`A$I$IT|||8w 6+ݺu#==4nFʗ/tLIt BV2$$I$(,$I$hٲ%f"X٢˜t0r?/S e?0爊 :^Ռf|ɗ\5'nvK Аԧ~Qs=^zkGѶm[{96mJ-xᇙ:u*v :Tڵ>?ӬY3FEYf /sNQ%IRTը !$I$0H$IT\~kޞ0e͛sp^D;j9 pVDh߰o^#...xb3$w+E\rKS>݂Q,7V\ԩSڵ+o]t!::=zO2w\tdڳgs+^z)tڕs%ʪUx饗K:$I:Fu VC$I$!$I$0P93c ⯻""83| 9O=k֬!<<-[ҡCڷoOiܸ1P(Qxbf͚ŬY1c , ;;ZjѾ}{?ӭ[7Zlu-Ii%-y׃!I$I'$I$I: q<<,\zqO-_N͈geHVX| L Pm99thۖo߾ԨQ#:)HF@t34ATd&uK-jD B{{w}Z9s0g_vITT7yiӆxZjELLLG_~`…̙3oy~z"""8sԩ{ou+IJ`v" :$I$I* B$I$I:ORRs!%%Gy(\ΥwH4ZL>>,*TK.W\UW]EZ>}hG;zZӚь: :JY̢=탎qZ]6kצgϞdee1oykBf1! zg (R6h*f3?Ǡc mۖmk駟OOOgĉIT hF:AG$I$IdA$I$ITڵظq#_~%sy/yٓKp)Cݻi rCP6w֏}LGD}VY{P\9Z4kF֪[sp-Ғq$Ӈ>Aij ִ: Pvmj׮}H]Xt)˖-㧟~fzbsYgQfMjժEZQuf͚,''uֱvZVZźuXr%k׮eլY5kְb 233(S ԯ_urEQn]֭KÆ iРʕƿI. :$I$I*$ B$I$Iу=zm۶瓞w} I]VfOn.eB!Ί,ݜ p%kv]:}ˊ}Oeʰ""aCVʖq4nтMϹK&M|#1 0y T:N3_%-hAE*EEP\97oN \{n222 E222XnVbΜ9YիWcǎm¨QիWzDGG-111DGGk^:QQQDEE.Qnʯƍٸq#6l[ IDAT.o+Rvmj֬Ill,[Fyu3Ϥlٲ$I*m.by6T :$I$I: B$I$IaUT . . _]Xx1K,aŊ|9.[ƊUXeK^21Ĕ)CLn.1YY P*Q@}rUD;[đm_}l6`%,aa .'y  VL u嬆 zYԫWƍӸqc֭K(T]T<#\̭B2AGid8󂎡lٲ4lؐ ֭[YjU(,V ===_-p?UV%22J*QR%UFٲeV*UjժTT"##)[,˗B TV ʕ++Zjӈ*UT`m۶@@l޼9sNNNqmڴ)dggsN233ٽ{7۷o'++m۶i&oζmضm7o@UT+_\Ӳe˼Xj׮7sM$)H |AOzF$I$!$I$ʕ+G-hѢEw?-W$mݺY+33}@!RB*/OTQF11PfMbbb83W<n<#dnc4W1(4iB&M ?;;;[ne֭ "m… 2e Z"22-[믿_cvuAT\9*VWFʕRJ^K͚5 _*UDdd$UV%**ʕ+i%<_H/h'T B$I$I*$I$I:*TPProٲ;vs|oo}0~愽oX"+VjժyTS<@_2IQGX:5~I FԨQ}ygk=z4G7Ö}3x/f֭n{+ByR eʔɛD$ILgRI :$I$I* B$I$IRAjըVZQ@#I{~O;1t4P X`ANLƌyBGݮlٲyիW?1%Itt /"YO 1AǑ$I$IGP&$I$IOoz5_EXI%C 5tptؑ&O̐!C U "IR.ĠH$I D$I$I*fQA FncYBZ2*RSSIHH ++ٳgt$I$dHw3 AG$I$IGaA$I$IT E_ )Ж,`AбT,`C%Xrr2tڕ4₎$ISc>f3"I$II$I$E/1*TØ#Xb,*233߿? bذa;ȠcI$IOBĤH$I# D$I$I*R?g;ۃbl3&0(*a222ر#)))LcCƶc)@?3t S$&&ҵkW҈ :$I;wx:$I$I:!$I$Ii pH#>c>&I:Ԧv1Tdffҿ İa;v,Aǒ$IR1חFE$I$ĂI$I$4ҍn|WԠs>#t$bNvZ|222ر#)))La낎#I$I`A$I$It93>~o–cY* B'55={6AG$IR s;Sʾt@$IbƂI$I$4FI$1Fh<KjVPZ'QqLbb"]v%--#I$*Oyyg3#I$I D$I$I:u+Sztos-Vr#蠣(@ߟA1l0ƎKdddб$IT]WJ$ITX"I$I$jPLfC񍮧լ :AǎIIIa 2PA$Iǧ2#d#XŪH$I$,$I$IJ0H"̤Ŭc$_I͠c( $$$ٳILL :$IN#r/10E$I$aA$I$ITt Gp13AG Dt  99DvJZZqqqAG$Ii_+8$I$jH$I$IXg:3y47s38$I$I D$I$I* &1'y⿸Ơc9CH鐜Lbb"]v%--#I$yg)CH$ITjY"I$I$!3)L+5cld!LϠA6lcǎ%222X$I*PxWy(8$I$JH$I$IӉNcӉN$KE;IE'AFF;v$%%ɓ'3dBPб$IT'r-6G$IRǂI:>|KOO3$;;TڵkG_>J[nQFTX*UHJJI?I$I C #ccWDGGsWc$I$ hlfӒt3I$=Aa!N!333h رc :$It>a[#I$IRaA$0 t,!GgϞԨQӧsUWөS'>7oޜ￟+W$I$bH!1R.e-k/q/##;ɓ2dP(X$Ia=sԣ}CYAǑ$I$T D0aׯ ''_|;OFF7& -Ѭ^:ϸq_8i֬r 'N$77$I$I"`FKYJ LcZбt!KMM%!!,fϞMbbbБ$I*Oy2, H$IT*X"I(&&k_~'KvT\UVo;͛7n:t` 0$I$IGӖ|״=]BIaOбB,)ILLk׮t$I$КӜx&218$I$,0x`^z%5jsO]ta\r{ 8ɓ'3~Y$IV*[`8K.v{̫%333h رc :$ITdr+ȍ|G$IӚ!tZhAӦM:t(-Ϸ~С<裼klذ۷'pWԩcƌaŊdggf~i:utG$IB b0I#,'czdNxITtؑ&O̐!CBAǒ$Isb(a !|)!' o.'C0y4AѠH$ITT :$UTAG$I$\Cf2$Y"^E :ZCc 88/_N˖-$I$Wr%MD 1 ehБ$I$I8I$I$I0eF;1AG:!p())DzҥK-H$3 g8oFq$I$I,H$I$I:fLRH=Mo~o!'XVr&33!C0b&NȌ3Yfб$I@r-r)2AǑ$I$«tI$I$I[/ћ g83AcҩBr%" : /--.T͛GbbbБ$I@'D0d&t,I$I*,W$I$It\ f0KY6Ўv[AGt W ^rr2;v$++˗[$Ia .a3$I$IRe!D$I$IqӞBOzr1QdtJ dĖDbb"=z`ҥl2H$IR"#`/I$I J$I$I$XjS̠ MVBH233>|8/?cǎ% K$I*Bx bC$.X$I$U(B$I$I$b0HG2vy7U,񗖖E]Djj*#111H$IRwR: c=t,I$I*H$I$I:q|˹bePjdpKrr2;v$++˗[$IJ>a"k&;H$I$UB$I$I$x4lְ&XRmj]'9q$%%H=Xt)-[ :$IT!]5+\EfwБ$I$I*,H$I$I*3,g79 :RSZ!33!C0b&NȌ3Yfб$I /}yY2zЃl :$I$I嚅I$I$I!?}(F}AǪ0\!HKKK.̝;y1n8BPб$IJXR2,_+H$I$[U I$I$INuHlf#x,Z2hBv3$Wrr2$..˗ӲKI(==ٳgSZFV3ݟ?gA?? :$I$IݻK.2X$I$IT. f0?' d iTr1\A "pc$))o0uTj֬t$I$#ׯSNt!('NnQ[m1":$I$I~@ !,I$I$:@F1} :V"DMjZ)c 2#F0qDf̘aD$I:"AdH"8l:$I$I内I$I$IZu3}8`TBP֭[]e$IA"I$I$jC}0˸빞 :qb$3l`!r-s>iOs!\KNNcǎdee|r$I$us6lHժUiܸ1_|1 ,k>gn&5jD*{qɒ% 4x_>zGsup_3Itrsss!II@E@(b֬Y IİaÂ!I*x -,NԠ#-iw|WQDq dWrr2$..^{-[I*:tpI*U/"p ըƓP(kF~=%3IND7uI$I$IRTKtcu0*,FKJJ"11=ztR $IR%v%W_pїK*UB~ߑYEQn]}YV9J IDATyԩ{??̺uh޼9 .dΝ{_*gy3fФI^~e6oݻYlgu}<@sJIIwf͚n:zRϣ4c-0sLƏϺuطo[n7ߤW^aP|عsg{ͼyؾ};cݺudN>dZ&Mo̜9s2e ]vVZt֍ɓ'ԩSx饗KhРիW?y^+S1|Rϣ4cKҬY3-[ƣ>?϶m۸ ?~;twjʓO>iF5[_aw*I`r J",f1%JP^tKJJbذaAǐ$I5\Cq$t2u*EVcDp>39& ^ZZ]tK$&&IN)))t!$T:~I$N :$[Pߊ/"W]u5믿C/A^:l޼ ͛iذ!իWgDGGw^222[n޸ իo5kdDFF{Е1طo_Xs*ٳT(롖-[ƕW^ɚ5k=_{ C߶m1115k_~ypϙ$I'AQכB$I$IB?1s'3~Ƌt2u;Mv""DCrr2;v$++˗[$IN`QD1Qb\Moz)M*.6mڰi&xzrrrؿ?W\*j SN|,]~/Zj[o1jԨR2}Gnn.Ç/`Ϟ=k׮$I:,H$I$I:a4XHF2A f0]бď1]B=E``%%%H=Xt)-[ :$Ir MxxohG;1t҃&IZdd$wu?pcڴi͝;y,^8߸&$|?ϼUA y0SiQ7 R :ub̘1̞=+W#}g}J=Ҍ8={6rs9 [l6o_W|cGI6m |_ٶm۷og 6Ι$I F(4XI!BbtUPYf1p'I:TRRRo8J$4ҸX &3_cMQ/%38>u^8Uۻw/ ^/ 2$ﹴ4."RSSy饗HLL .$:tpI*xxx!IR`Bq%=\c}~޽$&&XMnݺ1|V֭[i׮֭7梋.W_-o)S!ΩkgʕԫWT(XkiӦ` 6~}:#<رc <>x`ۅ{$I:B$I$I$ҔdX3 ;{~ k+'D۸-/bٲe<{ԩSs-om۶ cC=Ě5k!|$''ӱcGX|eI$IaNuK.Bn&~yIRrWr)\jxyi۶-T^mC+Fԯ_Eѷo_j֬Izկ~ ,X_L&Mz$$$0zh>0}tJLL 5k֤_~,Z(Pyf,? 7@͉_<|:t(-[$** ЩS'~i|ɰQѣG#pꩧRZ5Zjń xgI`B$+,B$B$IR2! dNe^4Nlf+" b.8i"xn|Yt)}7[ng?c,\mۖ]ҽ{wtRn!/$!!&ԫWn0uTj֬pRI *.i-kyg9S.bB$UFŭ6"I҉<RY$I$I* ˹3эR!"Ρ! N_vv6+Wd,Yo} #a=89}ٜyy{Ν;Y` ,`ʔ)߿VZѳgOzI=_~@\~_{,'' Əĉ7n\@$I$U&h<-xsўI$IR`!D$I$IhF3I2.^~q19%ŋc5j9NsΆs @Wv,L:D{9-ZTHKK;$I$Unq3νK#~%\BAG$I$,H$I$I!6, ?sYd&o2Y\ʥ-SI &Hiׂ֭[=ztgggOо}{zLH$I::)0sWqw0q\5DtDI$I B$I$I$M6p ] rPs=]J,&q,DFͮ] rnmrYg%I$ yNwAnZ&5(IR*8I, !$I$It]rY%fØÜ29ny*${K稨(x-H$I:~4^Q~oxnnc$I$B$I$I$DE6Rx!,M^ER"@Jr "{eذaDDDS(QgݻL(I$IEiIK &0'y'xGyA b4!? :$I$"I$I$I5_&o2,g9DKnUCBpҕxoe* "< Ibb"C _~TZQ$I׀=cCg S{{ z~̏)I$I,H$I$IRQAn;w'>ד1(  e˖-|'4l0oI$IN4 1'yxtd(C]5D$ItD,H$I$IR Yre+\B:t@-C ">`+gAD$IREp'1-H6nn}0~/ϕ$IX$I$IWq㫸D,H$I:ЁyGxy'DNT 24 :$I$"I$I$g "I$2Cb33x{9KA N :$I$)B$I$I$Ux@. "$I*xyX2f3x L 28₎*I$I:N,H$I$Ip,X$ITDnd&ws7r+=e\F?Qv$IB$I$Ir "$I*TL2ylnf1v.R.0踒$I2f!D$I$IRcDI$IE49p{'x0 L ADБU233IOOgÆ ~{&Mw߸qcbcc8$Ia!D$I$IR, "I$"Mm.9p!y5^w47CzӛZ:c}W4!uS3A{+wlذ7um۶ۮaÆ4jԈx>#6m7|o\zqu4ҤIW~,I$)|B$I$I$w@TYX$ITEn ao/թιKK~Iqg:0ϱBg222: OOO'777o|3<37nL||8G`q$ŕtG_`IUGj,$Iґn%I$I$t|X$ITEYnwr'K0MցhF3Lc?GƍIKK#+++o|EB qqqQT^VZѪUbǕKJJ 6l`)v#$IHVI$I$ITȡsssLR:sU_Y; L2drrr8Sk؂$IJU"`?B[rqIdq=֮]KvV$w~J|8Q൵j*6l@zzz!)4mڔc=UI$HB$I$I$Urss,gscu_mۖsg_2rHҸ8ǚI$I"Id~O x;#/qI&ԭ[OGőLmVd))e87oNZT%It"I$I$*{D*i۶-m۶eڴitܹBBgAD$IR*Ɋx(7D(dȍ̅C!26eWq Zhڴ)u9_` ({e֭%G6m 쿸UG|K$,H$I$I' رoabbb8s1b?OYv-7x#.5j 11ɓ'O?eر,^.]sgg1f-ZDdd$=z`ɅfO7n-s<Ys9sCBll,v8x.} :Su͚5l2o_~Illlc:Lٻwo+ "$IFI+$OߒN\@7T^DLV 3$ -Zrz(GԊq̠g*jժUٷo[l)'##7ܹsK\!{Wȑ$IAB$I$IJHɮjڷoOݺuYj7t?O -K([n_$##QFqm%oW_}E޽0aӦM/Od…nիWӻwoƏϴiӈ୷.+pD޼ͣ_~$''_~뭷=׏~~oqqM7͹sY.)k-O:t(?ۿ?mڴ+-Z(t'Ns%̂$I$={yQᅏC=!!!5>AF-h߆Q;3oڵkINNfʔ)p)9Ȋ+,H*S)))%^'I߿TQV8~Æ l߾=v= o֬UYRyR\q}ZZYYYyDGGY9>..}P$HB$% !T8 !$H^<իw殻gϞ% m6bbbۻw/իWgyҼyIMMMSNyZԸt.jM޽;V*u^S(NVɓ'_Op3b?l޼9 `̘1;Ie %UT~R)z[o6oUR~W h޼9NұVTqЕ֮]ˮ]V+8KDDD$I*?C!$I$ITT0aR={6&L`ȑl޼v1`FATTTZRbÖ-[ -űe˖֭[iѢEW^bn(=22@pyfnvz- +C pWG}tRSS9 5k4ceRv,L:Ղ$It^}EهߧsμmHUVtܹEڍ5! W\믿fɒ%%?XF|&It|X$I$I! I'ĤI4i[lwg-c4hЀ7(Vlܸ ^`5[UV(Jp3h ~dVZ9ޯ˓O>Ν;3sLo?e$I>4򶉎ηGQE?_ұRzuZjEVNqdر#v111Ů6Ҹqc5kV{%I'%I$I$@P(DZZ4hЀK/"W8={W_e.ի:F7w)s= ҿ|/^QFGIpl2fΜIݺu޽;ꫯgΝ|Dž)luς$IN4eQHHHgϞ.;n?~hׯO0`W\qUV=.YD(*繃*?,Ec}L(h9cyw})SLNNzee IDAT*7x#W_}u})ƍIII[!PG6m֪$I' !$I$IR,]wiݺ5۷ogG{{ԩS} 3g&Mb…yƏOnݨ]6}!22 ۷/999tޝUy',{ԩcǎ;qƬ]'}&MӱcG{=JZZe կ~éQF;wL(bɒ%GdAD$IѾ}زeKEE]vZ󶋎wqr hXx$rss Czz:?ygϫʩp98>N:)ٕիӲeKZlɥ^JRRz?TNӧOm۶mۖiӦѹs^ #6mb_\yYfԮ]xLW$ V$8 bB̚5}=Iҡ6lX1$I:*%@:wlD*c/2s:pxAdŊDJ.%%؋$-z9&&؋w7nL||\uU|7P(ĴiJ=ߢ=t=o%wtޝs[oœO>T.[o/`ܹ%MFFFlܸkגM8őƍ$IR,HRd!DeB$B$""P(Fc=t "RՒ**={X`ƍ ѴiSYVR!ᆪQFڵ :QF7i$VXƵmۖcϲrJ&O @ڵKzƤҪU./~>l֯_X9 sWлwo _|iVfpOQ+Bݺu+r΅ <&##֭[uR7yy]vAѱcnM5\skvZ2e |r)EW)Mtt4111%G|XJB$U@BT,HR,H# tY*>/TQp=֯_ώ;mNѣYfTR%J*ڵؼBHll,~!͛77.55N:zi&wΪUիw殻gϞzTٶm[O!**=nGѸqR )j_(ᆪFsH[|\wy]fB!7o΀3f fW8n:ۗMőX"""$I*Iy($I$IT5kp׳tR;7nLnxGڵ+mڴ :$UZk+~l۶EpB.\ԩSя~ĝw%\d\IJon`͚5lذt233V4i҄FqiѵkWiԨ4lؐF8 ƪU&e˖B/(c˖-yߺu+-Z(tիWٳg3aF͛i׮ `ĈGˡ%psf˖-4h2hOiW2A͛7s[oNNNN:p{=nQn {X|֬YSbbbHHH(vܦMoXn6mʻ_~=~)'=={mMll,7E٬\ dƌÎ;hԨs;w3$ ͈#HJJ :J߿?c:^:͛7f̘1lP(tTjoÆ 'JNΝ;Y` ,`Ŋ߿VZѳgONzA*ITԩS_|1c{aƍlذλٱcGbbbqE7kSO=;ʕ+ٳ'*G7^[se…?߸ŋ3j(>#҈A\z$&&B¡⽖ps[n,X=rJ.rVZUqcccYv-zjc-*0.SԹ8e˖1sL֭Xa#}=p{=n^x5jTy#>v*-IFFF/zn:ۗMtt4111yq$&&166?[$ϟ%I$I0TR:СCƍW rZ رc9sqD x+gk.+̙~3fΜI^W^t҅7}d<G|ptܙP(Ē%KJTpiiideemsx#!!={,..%IR Z R bB̚5}=Iҡ6lX1$I* "/ r2dR!Zܹs;O1˳]2}t5ktJHϞ=-H\JJ :t8$߿LmVŶ6l ===GZj+_/Zjԯ_3` UV`ߌ;ŋߗ~a~_r`9gëUI@[lĥۂVQomT^1TuobH*ǟ `%.`#aIB~ IH$'1sf2I 0g>6wygz-}Q-ZΝ;ر#]v&LUVG?{l233Yv-;wT3R,ŋ3f 999r)1cVx͛7ٳgw=XߘV)ﵨY̿яu޸q#~;o۷oO{+8_XQXl2ƌ;CDDGgϞu|Kdd$_E*SXj}gNcLLL#$IA]oj!DU% !T6 !H͘멊Y/TWvlqdÆ n&ƏG,X~ϗ_Y뮿znV?m6222ygNh=֊+袋?~<>,5b#fEz-oE. &ꫯr`w5\C޽Yx1mڴa5s9'x|ee;TIKKcܸq3;<֮]{Bﱮj(3O~RBH ٽ{79s᷿-3f --}{3yd>C^xZjW_}E۶m|$$$l:}*rwXO3ucʼi&RRR| gt$/Wv6o̶mێzUWH||<111Nj[MLLQеkWZh$IRCRf"IBI*IRCdAmۖ 6иqREDD{n7o^ZٺukzOfJ}x=c{ѥKoٲvURYftܙN?/_~_?={p=JTTTx-kmذ|c|ټys7d@$U %URm۶b׳zjDGGSay$..KW[aɣwze$I Rm($I$IEbb"dff*7΂HvYE@eq^6mZnX=7oLlll}rwٳg3anf6mD^я~Ĉ#*f񌧼-[ڵk5k3GEEcǎ TT̴"Iz%&&zyVYlP踋#DFFVP%\)z]#~u$IttMJ$I$I,TNV?FDD~6m^u֪WڵkGnnnY0rss8ׯ/UX~=ڵ ~I'1yd&O͛y뭷2e -⥗^qj<ڵc奊5Gl|N:cQX$I*X#w?b+WիٹsgxMҶm #:tQF=TIHek֬믿ӤIN>ßݺt$I`!D$I$I,Ofǎ% ˪UѣGx;S+BFFFoF'55_~n?`tԉvqWVHRcU .`\~%/Z >cʼcǎSx"I$UHEvvqq˛}ċY/Z]v9|vdY$Ij' !$I$IRdA駟Κ5k]w#Y7nu\r%DDDꫯ2yd,XPb믿~ݻ}vNZ4бcG/^LRRo6?Xzug\r%0x`4i… yǎ95k8ӏ9G]`D$I5kFBB Gݮn:o^b6OΝ"6)qի ] V^#..^7$Ij(W$I$IT4Ԃ}rrrիWc*~,,,,?LFF}aϞ=wyÿNYOtc?!!y1fFMdd$瞫TycG̛7;[n3oIR)D0 cXQTDDD0sL I7}tӃ!IRrdAdѢE _puו![l_|1_~eQDmϠAxܹs`WET UNN'c/5Tc֭G(2ő.Jҥ -[ ptR:p7o>jk|W;QcǎiӦM$I7uI$I$3viӇ_|*85."")S0|p7oΧ~ȑ#曃v\jx^|EL@$I$ ыg(ٲeKG6lK}]~ꩧҺut\*/~?11џI$0 !$I$IRT "&M⦛njo<#s=DFFңGF5\tR;.W$I 4mڴRőfHo3$Vr!G$I5&J*A30UT𒤆n餧CȂȢELAD**ZTBQI%ռm۶"׳j*8Sb{*#OѣSO=ƍ8:I$&w3H$I$I*> "e"I$!&&={uGgYz5yyy}KDzFrxѣuֱ=c; >|܆{Ϟ=K|{K;ڑL2̹KS8bI$Y$I$It,HU$I$UGV>(.Y:ϑ#g#ҥ 5%m۶^;w9@rrr׼Czseb6,"ҟD$ITgX$I$It,HcD$Ijf͚@BBQLqY+rFܹ3QQu2fc9qgVXbe`="!f1L9MoRH!T0&4 $IRU$I$ITkYX$I*#ׯ'''' 77-[د}tЁN:f6lƍKlw'.u#>>f͚UP$Z3(*EӘXҒ˹ZzӛH"N.IB$I$IYQ]aD$IT5kFn֭Q۷oYE֮]ˆ 87KС;v @bcca $~ho "!B<ȃe,h9cAD$I5B$I$IZǂj $I ::]ҵkגO?EUgjV$2S8s8RH%>!_I$jX$I$ITYQM"I$IS>%lBD2g I&R,H$X$I$ITTTwspn. X$I$I!HzZI>#"l3l'XӟTRI& :$I !$I$IpAd62̓ݻէ\[K:,H$I 4 2!b c#RH%48ӂ.IZB$I$I/{%K 9ϧej*@D,4@$I$IRmtxA$Lc>&th[ .t :$Ij !$I$IB3>.r96 Hffz$I$I:jA$ H&R L砣K$)@B$I$I$=|!\~9лww RX$I$Iё%&Dьf?I TRI&8Nt :$IjI$I$IuGVs,\t)s_ւHaD$I$5DhFʡ%Lwy,&gyDRIe0iGK$Y$I$ITnь oUT?{߫Y=,H$I$֜nvńE y(|Ӗ'$IRU"I$I$CNx ~7 #Ft:* "'Z)˙L&\e]U$I$I-DB"iLc,cKDRI7mV$Y$I$IToqǠwox]NUoY .d}޽x Ĺˊ+X`׿w{߿?m۶=Lq]tW0%I$ITG%@) 2pN ҇>DplJ$54B$I$I$UBx˃Q ;դƍLrr2wuWnݺ1h &LP%#?\uo~}I$I$]G+Lbc)9C )D$Ia!D$I$IR?ᦛKh&TtA\ɕyƏHOfmt+pKQ$I$I*с e(OXV 2٤F[ڒBJ Aǖ$IuJ$I$I Q^^Q C7Tj`Є{Z$I$I&pAd+XZ9zғ DHc< -IꈨH$I$Ia_~ CgsUWH p%W2yEБ$I$IjO1!Bd5_G)J*iq]$U3H$I$IŚ50p deoZQN:$r37SHaq$I$Iz!(I$L2-la)K ml# H "_Uб%IQX$I$I%K ) aI-H1?ǠH$I$Ig';KDF3tp3լ:$I:I$I${4>kנI',DIvng;ۃ#I$I${GDE,"tֳь3F7nyֲ6ؒ$5hB$I$I׿kѣWUIU~B~/"I$I$58iN )% "\JVs~N': "[B$I$IT=CI龈 կ~E׮]iڴ)~:>h/5w\=\ZhA-8syjr4Rba"C>hvܹ;=zмysN:$V5$I$yHRUhNsRIe#<̙3,YkѣG__0rHjTkR8r`ʔ)e>7m4nF̭ƍ5k? ,\C~~>P;v_iӪzx$I$Ij-hA*<dU CWr%iOOz "tGIRmQXXXtIK"`&3ư`̙ ϓ$n餧Ci.яw!-"""{J׹Xd _~9iii1v>(o6_g1OC~︞&//N;7x={oٲݻgѾ}{Yg3gӧOUV]֭[W$ 111$tl%8^tٞ+TvxСC>$zыdI!98)$,5yM!D$I$I+rsa?aѢr ŮR|C )%\>)Lfn&L6m7n̈#Jݥ逸bȐ!ow}K.6 ?ookVH$I$I $մV _+M6Wp's2I$1e._uбGIRme!D$I$I >={`bի]vZj]LL ۶m aÆUС'YJqh]U6l\T)((w#G ozjN?t"""m۶emf̘M޽;_u]ǜ9spmI$IR]"IA;S?gI&DKDA$D :v%II$I$34ڵ,(DFV|Clܸ7{AӊV΃8x㍢RHӧs-W_O>Uz,D]ɕ f0#Ien?N^^ӦMcԨQ%/7o^O:Q8;߆^-N$*{COzҟ<߻w/m۶?cǎ$$ 111$tl%(~/z֬`sHR XJB"y5%-9sI=7'x/u?J7uI$I$68<w RӜLE^m.ۙ8q"ÇXI$I$I tyYjVybxH"6 C9xL$"I$I$~ \g ͚H.r.bF1<;#uؑ<'$I$I,fM`1 D "<(I I$I$ X.wN$zS™o-QXX$I$IH: )d"D&2=H2ɤBA%II$I$)h6AZn -ZHэe+D#I$I$I"%t (#>",fcB"۹$8RH!T|5蒤B$I$IoXv :Tg\f4_Dt$I$I$IHQH&% "!Bd>(\ȅts%I\d$I$I`b7뿂N#ySJ.N 26F3.tp3լ:$"I$I$մ^x^|N# r*ws7s?_Eq$I$I$5 GDE,"tֳQ3F7nyֲ6ؒzB$I$IT^~&O!CN#+q]-tI$I$I XsBJ|s5WӉNlf-Aǖ$AQA$I$I?`HN{~ 1 ҕ$I$Iൠf1 ",#vm%II$I$&_\tJF28:$I$I$pdAdXBx""RI%4&䒤&2$I$IRw/\vl fAid&<AG$I$I %lg; a9pWЎv$Df6;tlIR-;ϒ$I$IRu*,+ahݻK,O&\U|oI$IkWHYre߬aÊn^"IZѪ "M6!BLcDҋ^J2 d iprIRM"I$I$U)S`[H (F1QɛYD&2&$I$>< IDATgШDF+,,z9A((N)I:fp C-BE!BLbhY.B4'$UȠH$I$I{ر_AF41#Dx<$Nt1f3;舒$I$U!&W_QyAtVtӞ e*SYRֱ?'Id.sI#V"$2!/ؒj`!D$I$I[•WBj*qGikL."XI$IT+Ii%IF, e(OXV 2YF[ڒBJ Aǖ$U !$I$IRU+,+sA .ved%PH!KXp:I$IO~|^^6z-pAd%+YZ9zғDHc< GCTkN$I$IT~k7f̀vN#589tӝ+RH>%6`J$IT]򟏍#IDKVyxgJD&2,#/ؒJ"I$I$U axxA/4R4ȍfwoZF_$I$IfpUФI4ÝVD g8O_+Xd&G(_ C9tlIR۽$I$ITUl.F:`]ȅ@QkFU8 Ij,H$I$IUa1|ڴ :ў4q|_$I$IW_ ppő$mGD>~ůhF3&21\02 "ZŷÜj$?B$I$Iy3p\=\tQi$rg1s6gӈF~\Y!A*r:/2&IK,H$I$I'^6mN# Miʳ<gpә^%N :$I$ITg-[ĉA'TIp>q_%?aiٰa]\֭[|Æ Y7ҦM:t@ǎ ?LJ∎zȒ$Iy|[Ձ>G^x$[mY"l KHqYB 3RRh C;@;LItaiK)KҖd m$%@$lb[ny=p/ZYw9Js ڊůQ<|z-h4`2`4QTT$+0U$", ip|]|ȕ bC y0q$''G{/dK40+?C# J7Ht9DDD؏EoUXc36ߏ>=߁@@?DAaLyj)t@zz:vCp"X< O388nU@OO†KV<~www7h``===@HCNN ^GVVVԿa03221C %j`& _֮Mvif-ьGss3\.WdžEc^łSt _L(R.5QG-˾}b) fY*X,c^oDDDDDD4qH<O]QZZK*IV:neo\.ZZZ088Vr30 F3r500_BzV:5'04 hPXXү<++kBL wp@yyKC4 z54QUU(++Cvvvk9r*005#%%%HIIM ;a>Oѻ#χnh  p'h뽮{ sJ dh&Svxz!bz^֠C1l)քjKaO#Q S _$Fvv6F300 DޮI?8qBOy466rN. 'ODoo<~VVJKKQ\\ ٌ˗Gah4B'fDTPPwttv*B#R?FSS188(Og2`Za``ZQZZ &1U12DDDDDDDDDj?PX|. ј z 8477FU (#ADDDDD3_p|5=xx^ +!<=XV%4BMhkk>~?|>$I$BDC}}}=HnGiiл9ь\vi1BvI"s:Ů]ԤؾbΜ93gr^O~+GDDDDDDDDFk+_K)JMУ]]]4Zr#ݎ*E$֎&["F3p\aw^Ulj]U"""""R!_M#NCAAt={_aX`XTOπ ISS:;;Ö+4:cf&;vLz>ZZZEn|rv!th`6a6cӉF444ۧ/o0 9S`&!DDDDDDDDDjgA]~̐GCCi6тcII RRRX;p8'y5^p`ddDN 7 },++CvvdThƐ5r҂A ۃ=5t7h4I-P{IpO9т$R$~?,H o@4}HÎ9"?rLvqX~9ӐlRTT",[,Pl]]|ݿ? pCKBӄGCCiB;ŗ#t:ZZZ c?c555عs'NjIXی:zU{{bEHf3DQ!p 8p@s碢֭íފ b郈f"$ߡpYg /ʕ+qκ!DDDDDDDDD<pg'$@ AІ9RhְYYYIMGHwQmhh@wwN48=Fwikkk_سg?4,_W^y%V\+V:1$1Kq饗SO=jr@䢋.Š+6#3vDDDDDDDDDc=$$$NBҝWcc"Kmp$*48ԄNyV°Hh/$lDHDDDts=xx^ + \~~fV+T[")x@;Z~>CCCyDv $p٣sNݻ˗/\UUU M!eeeشi6mhjjݻ{n<3O~|]n:dgg'㏁""""""""h{/9%b5].ө zc0`ac&8f?""" v zyݎyyK.]&m.o7SzX,X,D)tPWWf8N-3Vh$t{w^/oߎ~?PPP. O=֬YdTlkq>#Kx饗qF/dž p饗B&EFcK2 zĻClk;k4s^BqX~=6l0/$k4!"mm]L2"eŃ>={`xx÷-\wua0ߏZU=$EzdIDDD4Y~`J3v=j^ ĉ0x3˗/m݆/o.)WƩ<}Q~|_ŏ~#l57e (A xb (2BhJx[Lvi&Egč/8/7gJCXy{xw;% y&wSimB:K>#TUUv򒥿q# 7mF{,--ENNNkGSTt5[_= z~8Nttt(ndBZZZj|X'z4/ߟb%l&lp:7PǷl"6n(f9|09ƍŖ-[o8|p:ɮHGcGJuR[Hzg#~^j{@t:y|F7w\yUUU-^Q'N.ŰeωUw!nݪWZJ,ZHs}t> .wNvq%qnCrM/8޾2^Lt_jG*{c 5eMtٯeXK+?jȣf=Kn۶Mfi&qu׍o z?Pʟ'|IqyىfwĺuTlx$60fBj"+Ѩ n4*oF&qP~~Xh.nݪh4GdWyFy$#1##Xh<y?_m] }//_.~a!>#߿8S{ァG2{=|rŴ###SOn2ybIR3[ {JZ6bl6EUV)ž[n;vP=]1{DJJ馛D___3*_W7}L;q,c>fjGF+{$j˚|%_>ee6+|IuTVV֘_rFގ^BNj7XxxgDKKK̺4 s΍:>cߚ؈Ay^9 FQHrI3ۯ>yÞKÚ000^7JҾ0ڰbNX}HG<2x<2R?.(n#j!$Ȳ&Og$xyXnoߎ:K1N"˨į~+,[ /?)^{5Ŵ<bcsaύF#ӓTp83w}w3j;g}6qWj*Kt?4jc'"j j&zdG;PSDIxSRH4 ~_W {׿uBIkfF>/}K %޲o&h4ؽ{7V\ p\ZaMMMp8x<1:ttt`B{ɼ)!DD b !DD1BDDIwMo^O@mmmq###tjeee2StTFIIܐoCsss<.BV^^?ĪU6.eh4hkkC~~UcM?&R>5_!7E"s0ͣ^LѠׯǭފe&8@Vӡ ~)))I@MWnsxiXh? zXV{i< 瑑02':ω$2X{g9u]]VZ{ ^xa8,g??zn:|ĺu–!&qMsχ!<D; ,믿G[o5#ttt_Ɵ'Xz5z!PFs]m,D%MV_qXG&RGeMt?%xnŊJh9㭗X[ZZPXX8.u x3FAggRSSÆ:?Eᥗ^e]s\B!Bh<1BD!DDt W\w߸oDfYge˖aٲe(..fdVײo8t -- ===t;᧧}*1 w}7^x|>~X~=oƽh?W[H=?+v+_m=Ԯ7ˍ˲#Mn4uy^x<455o6:?䢋. -?BhJdsOq222`wѢE0(,,DQQ{,HGƛGu4|*G&Rmm`9@XmYǎʕ+ka|F͟?xQ]]6N"|Xp!vQ]]#G@Qkxx---hiiAkk+|>jkkq17;LHN?tTWWO6mBVVx cv,hdj&zd"eW[D~m<)c)K"u ]G\Vގ^b/ㅱޱ>k:?>:q1Ǜ źf8v xqa0rvt:vw8ojjs=NbXV9L/`iضmJyQQq)(knnFQQbڸw<x---xװuV?uDm9#),,DKKKܻmEh߯-loOnǃN8p>J h4d2)L&F9o0;<2:G7#kOO֯__=t]wa޼y />{0³>Gb˖-3uc׫8ΉtzcG=vwPZZ'NL)33?%KƲ Q;XL~%d_T[D%s2ֲDֆŰfƄ{;zSxz; xL˅dEB/^TUF#Css ~?""Jz/J:ߏ[p\hii ^G~~><0F #y9^ydK<upgWI1.|+ݻw+.=܃.飯/qHa###yzqGYY9眈&6-k6l7|sKZ9JѠ>l,X!jU?N6Uy=2zR[D}XɫSG^:$ro?%ˎT4Lzk׮MvQDDb{A3};ODDmۖ"laV%1>Hbǎb֭o7np,(~ŢEĪUƍ.n*v!x GdWoZ '3<#.bQQQ!>|ZGGx^c *N8!W__/,Y"~߉)^z%aZ?X5kÇE n[?W]uUܲl6o?YlVD{'Ν+v%šCDeex衇b.S!6m$/F%^yf͚qծh"rf]׋~q iӦQ7v}(--?zUl߾],[lLx;DUUu@_ZJ,ZH*UZVfp8DuuuPG*߿_߿? "JT~ )/^|EO[;Sl޼YTWW*h"vl-UUUZl޼Yyb֭'/ؿp:bpp0-xx,xьu%^k]CK,g$}Yqꩧ -Kt@ۿ[e9ϽK 999a z^qpW-[(v%>,N<(k좌b oN#/.bqwe?vZXP}}2pUu=2hImYXkO 6eEKKD}}}f"Oiwxꩧb%ֺHdF{3 [oUg}V4y Z{扈&!4!"""J!dd\)m۶MpH h4#]]]ɮ^EjleXĺu/K"N{K/TdffLq饗{/l>@_^L|rs)yWŕW^) Vsr[;v9sV+N=T /+Z}-g4oz^{W2}>ذa0"33S| a]?օHd={<qFa2V/۷ou}C"PYÇڵkEffWGռfs8bjkkΎ U zHQmۦh:PlSA5hL Oyb˖-}lQ3l">9@"'#՗3GNDj<{y,u4LӅ3BөO>9e SN9E\JkiiiҾt&ʓb``@lذA[o5,5_ũ*Z0 3>Q~(i?"2D\6\qx_&zR[Dv]O>%xGWYYY"33S]V֎z9ۉǟR}EuuΎZX"qH='c۩ĉbŊBՊ~ZtS!DDlv\kLFú#"yl޼9 "裏SO|8dfp\Q=FFFz=, fsDz2dgg'vDDhnnFcc#iz=JJJQGKBDٲ~~?4 cWG{c"gǎعs'~$(N___kFhs|ž4h4"===I~n@FFnNKvh¶4y|>?0o<<83TO?7M%Mii'$JbbļX900 kjjsNű TU$!"b x %{9 vHü^&8['gj0[o> w}7 lذk׮e&t29n7v؁z o6,Y[뮃^OvF"""""""""g&3EgBj#aw/#<E{J.˲QQ*޵~ y\(jl|BDDDM:FI$Qn]]~?L5iXqq1RSSǻDDtk*ٳ <Y< a0]q@8`G?h )@b.SCO! @+$XWW{]]]r DڠVMjl"mc***¶=6 Z6#"""  ,K^*%j&8V$ldLD^0=\=6 w~^;w{-܂<Xr%V\38׋Nݻ{={PSS!,X]t^\p3؀"""""""""ؿHO,IvI ]}ӂ-xᕃmhBW|ґ6`*QzcƣNj#N4Zq{)++CZ/Mw U75AR'fDDDD$5VKjs]]+nD+4|wƞ={p{rrrpCee%FcND4ĉ8pߏo8x h"\r V\dw쑈}dd/9@oE+ѯ( & b#yTČGp5556!(P IDATDɡ&?0"""Y"w;Gk -݌bxxX1Hw$l͝ '|o% |RC xov{thd,Y7t"۶mCCCr@f95 "qq8p<.UUU۰rJLd9i?,XRDԇ>G3K#07y衇aF>aX1,~) $ b<. nB|08B4~=BJ*#}v;U, y'R""""R-8@V>Ν;Ԅ;yII RRRƻD4mW~*kVJQQZmjK44 ***PQQo҂ȍx y XdҥXh,X  333 "Rn7=cǎȑ#8x :hZ,Y6l@ee%.] NbO WOb0|71{ w &PO! qNiXib[txǃy/ᣅGʐ=%T~?%!KCCit: =G6#"""B $j|$;Zh4fDI&0& Sz``h|aXz.b\|򰡡! ={CWWO% .ܹs9s`QRR288'OuuuC}}=N8cǎ!V+,XJlܸv-Z(1BDDDDDDDD@H?[q4IѳG=Q.90 #0rL Lfɮ-M:NUpd``---{Lܹ'OL88bZ7եFMУAѸ,=p8 YGDDDDD AZz%m^WWߏ&tvv-SMhDV\\:ф>+sυtHɄ46#0|̟??rHرc裏o>$ljً*/I`BKKWK@ 2=\K'09^z ?v]7Q<%ՅOOOGii)v;V+/_=c fhF3'pÍOp 'z+OT4M8%}JVXa 6Pbh4I ***b/8܌FEC^GaGkkk<\.WXC-5AR$vDDDDD4  ,KsOIhHAڄꕄ祳R0!%1dZT["p8"W;cc#\.t:;}/RXVXV%%%(..hlfa|>x^nx<|>yt:؈fdʕ+nfceS_""""""""ħxMhB3рE4:`XPb,2\Kp&?5&DSh# )w^444[Fӡ np)))]--nvC!O'5 iS^^$֎f T-uuu9k{{bz4Vh$xdb#)OUtC1}pHzGCqq1RSST[""u222xb,^88p:hhh#MMMp\UL`2hbh ^XXD,n>O{x^pϧjZL&XVx+bAYYtI!%g,DDDDDDDD4C .4Aݣa #XPRXa98X`6`&+ɓ)of8Rp$q/# ÂϺgpA0Nii)ӓX;"""""؂{!tP=DD H~^ÊyD!"Zf*I72H|>T#VЇh6C^^n?=H᡻XK;O =GmnnV#Ry- o50BDDDDDDDD}hF3>qxހ /r;0Ì T`Vǩ8/].jKD0 1StwРCDz)GX?Rc:ԟh"H n -PWW;w #B6Li ]_77xI繡fA&DD3ڞ|z|hkkCkkֆ6ףFh$55Enn.򐛛/xXNN{2B@gggأ8mmmeI233QPPBfaٲe(((PO걇ٍѨ bP$Nʽ|?B<~! QB,jF)JQ2%(A*t'f$9&"S2=b6p8;{H!"""""hjJ!jjjBD I1|6^r0f5|2}`{L& &SbwuuɁ98"5oooGgg'^/>CEHRRR| ya0^ B#33999Bvv6z=tf @E @{{;ׇv@ꒇE s@zzZϗ.((9sPShC[M鎁""""""""h#p(?.0aZ9Q28PwVoii;ؿ<<4@RPPш"Xh4d2^Q<L(]TTłO?]~9Ճ=\}=_Cw&""ldggSNz`ب?F___X]2t@ZZh˃FAvv6ҐN'R( /xHϣ(Eܻ$f~&33cڃ+o4v=>ȐC˗/W\!!LX}hz0 llQ6"8ş>z dhDyyypIQQ;QMY;P𔈈H\梴t\'ى>o)1<<,4pB7jjj\"fXhA;N?xM Q KM]p_\-5mlLm, uWތ)+i+"傆EP~+.>Cs8:XY\\\(V[=w"s" !""""""""X: NC f)}$($>?˖56"3Zgiа!ǏԳ'^^^QZ)jlll#$''[g [G򇳳ڕ<3B#5Tٗȣ>_7Cƽ#%6,YÇ?~|w4$|gNNN!ED !""""""""-n XK9n98H4D%=>S; >dB4CGũP*EjժAժggyyɾ`/{HG d%+ O-""RPYrU}ȑ#t=O'"O !""""""""DIDa{GH!7"/ը?=Q~HAFДb}ct,nܸat yD*#׹Qr#0GK*O(FRRA`2DDDDDDDDDPp‰owy8 #X"""r٬BH!BHR&~! ?Qt;F/\^S*(d&/|=FG~!bCD !""""""""OPIDaKS?|:OS|Nu̘_4$')DDDDDDDDD ! ozЃ4%""R988hB@ T8AqK" R F5P TE\rfyZъl#O}ְ*T1:Hf6X,ddd`2JJ\:yr?qt)A jPxnt21:ի*4KY3F)2 !$"(TB'8le791nsgxU^%PG.5Ы̚7o}m."*tX:2:Hg6K4CHBx1İm|~zE^$RO<*A0cƽ7.H@M[ZK!Ujt,B-sBD 6BDDDDDDD%lg;FjS^-hD#̘*QF ٟmT%""""""""RU";Ig:Ӑ|74ѱDDD B,IDQ"""""""" NMY@sGiHCB ͨK]7:&_pfn݂}%""""""""RĕКf6}ct,BI3*HB ;IbG8#ԧ>*hDcSFGwof!󈈈)0 /9NaF)tf3&INqd=nqE^5O}lV<.k/2;; J0.`Dao&g8=bd2Qxq,QD运EDDDDDDQ;mi\Ғ10&MA>f~#xfYNylRH+"`|e&3E/phxYc=!#<=IZ$===t"""""""""reۨA ~0:H7!""""""""R3\B JVҖɁ۷osrJ7|2$''sM_΍7X,\~7orUnݺ˗ ))jxS9ꊍM߳ddɒ쌽=fpttggglmm)Y$%J5˽dɒ,YPSDDDDDDDDr‰լhIKF7cH*|F<[b5ω oywO<0:(11~8< \x/f+|\rn+D₝NNN/^%Jl#K"䄝]mqxe\N %iii\v[nqUTRRRHJJ">>RSSviii\zZn\\\%WWW\\\pwwݝҥKSlYJ.MҥTR)S,u^c8fbt y*H\_q;ZъU! R8ϯӧ9{,gΜRx" eY7Pti)[,>lY1\]]qssޠ#[7oRL)/_|A빿xbme9O[˗[[=(""""""""S11xͫJ,LfѱDDD "R_ɑ`*S̉'zu>>>xzzRfM:w'ʕLJ˫䑏鉧gg^Ν#>>sTTV_%==ŋEjը^:>>>ֻ766`]DDDDDDDD7Fozss,`fFf3.]2:<}r/"""""""d V!#?Ҁ,bL1O͛9r(8~8111Đ IDAT@%ח͛k닏ʕS٣sssN:ԩSϧs9Nf)Ky&t_@&rmN:šCyqӱJ*<쳴k}???ʕ+gt|lmm)_<˗'(((gϞRa͚5|'aggGʕQ$RF *VEDDDDDDDDjF36!`ֱ<%""o"R""""""""6XxscsKmjMq"##d߾}>|ׯc2ߟj֬IթR vvvFGB ///6meyZZGRP5kqqqdddHթSPJDDDDDDDDD䱪A ִ&@[3:Hd6X,FGBHY'JWT5:ZvYkcϞ=ݻdUgȐ!Szu,ӻwod9tb޽̙3ł / G!""""""""7Mhz<KDD$ !" !"""""""EPi|׼G/z<˳FG+r2228t[l'22gbccCժU GY?qvv&00@벴4Zܾ}///իG&MFL&@DDDDDDDD "<^Eь! %""fBD 8BDDDDDDDd! D~Wzы?)Ge˖-lٲ[@RhҤ #F ^xggg<vvvԩS:u0tP^ʾ}سgwf„ /t4k֌ yYDDDDDDDD$gqf-kCZъ,5%""o888`X!"@" 4f2yD 2gxhEYv-[l#>>ggg4iرc "qqq!(( n߾婱cǒLr&((SLH~f=X`ӉN,`]jt,|A3p*bde˻kXRrFG+;ʕ+Yb{l6ӰaCFApp0uVo͈܋ kצv%=={ZU:&00Ν;Bʕ-""""""""P11YBOz@KDDpfYN:)ve;F7ֱJT2:V͒%KXf 㩧]v5mdtD֖@yqYpƌ:t{4h@ ӘFIJ2A\*oѱDDD @ZZnݢXbFODEDDDDDD #=h@Se1UyB;ƸqFYfl۶ .0|w2c@ǎ/8s [n%$$5kиqcʗ/oǍ*""""""""Ha|ȇ0 F1$"4CH!qLd"Etѱ d,Yٳٱc˗'44nݺot<"Xb4mڔMYd cʔ)4lؐW^yE,`,cqy`2$""b 7ptt48< """""""R]a| رW^yr1|py[N:ń }$##SO==QQQYƚL''/QИL&΋~j֬?XnʕcС+WAk.#2>aøm#BnܸapyX*P P |g|CB1и}6WA4jԈ={{qikZjMxe„ ,Z/gϴiX~}/ErO mڴaѢE??ӠAԩIOO7:%^b˘яt^-3X,*V,";g09 _hƭ[7o~~~tܙrʱk.;v,FG̵3f0gj֬ldɒh"O\ɒ%2dcǎTPRjUϟϭ[(""""""""D'ֲ+]/ĊHѡBD >BDDDDDDD =hIK*Qf2)II*K.FYt)FG{$)))f[^Z,CL&럙PVaÆf5jYɓ'yqss.>&'r Fٌ' 22~P^(ڷo3δiӆ MvprrՕ.]믿7睢h׮5Ov<ӧO3eʔo߾\tۏõkײ,qA,_GҸqc ?˗/7: `ֱ׌$""'T)T)RH!0jPdXj*PhJTTٓ:upfΜIJXtؑqƑtq同 }̙CBB߿?޽DFqm :ѣGs9֭[}MNۿJ,ɮ]r ֭#&&z7&&-[ұcGbcco߾p̙\@P͜9w}___իG +W۹s6%J0m4|}}:wޡAm6˺n޼ٳgST)jժW_}Oqaaa5^{:o߾ܼy>ӧ*gXX_yWl/==]v=0gXXGβ~hh(W^e„ |WY66mć~HppOzz:`Æ /_0#F`Ĉ~ׯONذaTX1E{午gϞٖoߞM6:Mܹs]t!"""&MdyM|||տ ߼9Z dϞ='P~}?nt,CU|O"48gt$'&BD .BDDDDDDD\fIMH`;ۙ|PhκuWvvv >v+zbaX9rdMHH 44~[[[L&&K(X2tK,! ÇST)4iIKK~ҥKx{{[y/[,qqqyEʖ-mݖy[ӓ/f[qs\ӧ/&%%XN:EvrnAdccÈ#矱^zlذX""""""""*Qܦ8#<vvv+VL3`"""""""Da,s|ΏHP5k:uK.l۶ ???#ёSbŊqttdX,222r\,x9ݯ+ٳg@bb"/>Q̙3HHH`֬YgtЁ8wN%ѣGʞ={#""""""""y4f\q͈%H"""f)T1OD L`aԥѱ dzA6m4iqܮ]Z*wwwLΝelxx8/NNN >> cXWt҅Ygd2s&{nr߈hڴ)...ԯ_¼y{nۛe˖pB*U''NO?%$$$9}||X~=K.bŊTPYf`lcϏ׳l2|||X"K.eu `cc }sf~!-ZgϞ\~8""""""""JSl$'$""ؘfBD 0BDDDDDDDX )c3Xb@֮]{8O?Mzz:=f5kVɌ3???J(+-[d͚5>̙3_//9qO?g;ݻw'66TN8AN`\pT:D= ##ú~?/>&m޼9| /^$55XqvvI^???.]˗vtrT^uq5oV}3ߟs5]q=h4nOO*lll;w./_?6:&[nMũ\2]z58::xyѹF6""""TCkYKU%_9-Tѱ7xiӦO?^{ [[[;Fn5j/fҤIlݺ:+W8uo~isY,MƘ1cxꩧDDdd$SLaԨQF1\ҥ3f SN#b5l0nܸADD?aԩ,Z:f 0#GǩSxy L/""""""E^>:7&R !H"""A3`* \@QccC0Qf֭G!:::K.l2 I7n{ŅNG}d]gӦMQB(U:tou7ruh(R4jԈ&MPV- '99M6E2e0i$|}}qttiӦ|L::fʔ)O>.]ĉ}ʕ+c2RJopBjժ 8+WL+WND#JPXyݝ_|/+WRlYc\\\8wYnݲX"| /_f…ԯ_I&L\zNDWjt 'll6{2e߲-([쓈%""""""^>~ C0Dmt$\3X,cCR!DDDDDDD1BU^e ^PXx71ciii|駌1"AAA\2G*^8=C a,Z(y|||8~8iii^Wb~s3w777YxLY8q;|Ƀqq|||"HڵYzuVv$;=W\&P`"H"""BD 6BDDDDDDD%,:9!L8ōe5jPJƏϑ#GѣGǏĉ7o.]l޼[4i҄9stΟ?Oxx8M4use6oVedd23f6FTTÆ cĈ|'G͉2;̦MrJD䭷c$$$ θq㌎'""""""bװGֲjT99!# !" !"""""""($^ezғt iB o~\Yn-bŊ.]I&oZL8+VLƍu_ux{{ĤI`ˋ~S)_طoÇ(NFF>-Z|FǑA̞=iӦQB*TiӘ3gFk؇#kXCu"""lbCD !"""""""`=A 6լ 'c-Zl6kuqURRRh޼f͚tR.^ȍ78q'OL<;v0w܇Z??Zj 6l6ͨQHNN2d2a28}4!!!8;;SL˥Km3::v䄫+]t_Q+W0j(|||0xzz2`"##}8;;L6mʖ&ܜΝ;myNr8pVZ舋 m۶oͶvYծ],ۺ8n … ,1Lṡ:Ox{{gYǏgϞ3viӦMme'OZKmڴ!&&t*UDbb"͚5W^;wRJqڵ:?Ceќ;wu<>HN?_3Nb3> #G2n8jժetɧpbNuZҒ6:}fBD3r= IDAT 0SFaDVD$0EQ0L,Z=t=/dȐ!FMlbMK:ߌg&%JкukΝёiݺ5|wTVHURRf0LXcǎٳg^~e^xx,ϝ;>/*UʺԩSX۷/5kd̘1Y֝={6`Yr;ߺu .yfƌc-(ei>}жm[^~ecǎQJ,Tnl-<< wy///n3yd~f̘Pg˖-ւInQNn׽ һwBQyԬY 6PX1#L&Xz^D@:u N"";KD ׮J!vGVRH"""wԩS9}QD |^7 !"""""""9Faц6ԣQD r\~˗2-+Wrk.#=Vnnn$&&&MdyM|||e6msֽs6_>:ubÆ Pb,wj߾=6m~d2a2ˋcW_e)&w}Gͳyg[.]>8p s2f <8׹2d[5Ӳe˼Dر`Vʊ+T)A V oiE+b5:]iM81 $pf0%,wcCrrrbݺuԭ[ fϞmt@hh(O?4"Ľey\xl7\xef[nfɒ%0|pJ*E&M>}:iii1.]ۚ7^lYrȰޓ2dHm4ŋqwu<==x%Jl۶ j׮\J({9uO=ɕfΜIiРk׮H""""""""RF6RDqFGAL|^la#cʕ+y7 {Y_8::}v,(9|.]sIbcc>|8 .wYyOIIyVdI ?~|r㩧zmle2k,f͚evzrz}뚹۲"!!]2dFU+Y .%8gt$,X,FB=\*} gt,ylllظq#{ߟKyeΝL0ooolmm0ЪU+VXm͛sd̙3E={vZ6nhӺuknݚm~!L{ ʕ+ILLuf͚éVq-X|ym}7h"˲ݻrJذa}|nϓ^̷~3>n̛7KDD>66zUDDDDDDDD;lf3D)LEDD ۷yQD!LjQe:ӱXhтҹsgHF4:5hЀ1c͛7aСͰ0>fϞMBB,^>  %::T.\Yt).]"995kлwlxxyyk.ظq#zL.]h߾=uf̟<ݖ~z.]JŊPfbwƟEDD@ӦMqqq~X,͛gͲeXp!*Uӓ'駟e{K8991|p>{^Ǚiذa=ŋs#s昰0СݻwRc,[ *VҥKY~=ٲ 872dȐl4םǛәTrs-5SbE,Xy ?;v$00[[[g}DDDDDDDD)C[r6 W$""b-X,0edddBD 1abAH!`2Xh=zzӗ_~y/Ȋ-8L*FG",s7nDD !""""""RpӃ7?#K DNصk+V^ӓPvitD|a͚5^Ahh(歷2:l 6 ???"##ٺu+:tdRPDQ,d$ʖE!֠E wjť ZTzz<{s[Oj(j* ZiuObd3,L3%@$W&y͍ܽkyLzgޯ%I$Ij]Gr$/r)5IA BhI$H$Iլb.fx'6lԨQ̙3rΝϘ1c2d\r SN R >l>cԳ~z̙ٳ6l7tSNgϞAǓ$I$I8cy8Ӹy^.IjupC"I$I1/%xb5YVVӦMcҥ,Y &pwsGկ~3gjժcJҧ~]wŸq[oeرgٲe?D$I$ImhF4OOp=G;X"%' B$I$IJ 5|q6g% cXб{())w|ߟ;[nw}7Rg{ 8[n\x JJJҗtLI$I$Qr*8ʭAǑ$u0h4pI#-$I$l|E,7iL :ډp8̤I4i,\gy|0Q rrꩧr'ӭ[#KIk֭??^zRz#(,,d 2䡨*))aŊL|?6m@Ϟ=1b#G+d1H$prI$I$yg,c9Sy7$ED";HIY1$Iva)L :":w̨Q5jT udxٱcۗ&2p@OJJJ$PVVFqq1ʼnXqq1}۷oױOc$I$Itt;/cD&х.Aǒ$3p!R D$I߹ H#y :겳9餓8餓b}+Vw{י={6;wv 4PPP@~~>}M,撞ԡUUUQRRڵkYz5k׮eڵZO?+V$ɠA8#0aoƠA4h}% |4$I$IRK. c\3ٓ={ңGRSS[(ՐZ6mDii)[uֱf֬YڵkYf ׯ'Nnnn8qbc'࣒$I$I:A yg<4f3^EthX"%/ B$I$IIn&~IʼnRs䐓W}c^>5k$:H;$I$IRv'$Or.ҏ~INah1$ B$I$IIR.b2 0HRBnn.n lܸ 6$oذ{/qӦM k׮t޽[VV٤ѵkW:uD.]D":wLFFӭ[7RSS"jݫb1˩e۶mTWWc*++ٵkDQvIUU۷o^޷rv/ ˽zJ rZ$I$Itə}\5WreБ$I@$,!$I%yt3oIBP)jjj{A4V bŊv555,+;;{u=Żj]</{fgٳx%---){Ӈ!C$a."-_I$I$IU\'|4K.qZБ$II.PRRt Io%I$IIc.s&+|'x,$ѣ=zFTTTk.*++ٱclݺzW0a#=.>##Ν;ﳏ]+&%%ݻNfff⹑Hp8[$I$Invֲ ᘠ#IX8&CA D$Iňqw~\=C:Aǒp8L8ntR~J$I$Iҡ"lfSB gqoK"AǐtR I$ID2)Md&s $I$I$K'?Gp,):$)Ia B$eA$I$*9y?g:$I$I$IRэnr.Md#$IJpJHI߀L$I$R3Wx?gA$I$I$I=HFr.粁 AǑ$%H$@4 8 D$IO8gǸ#I$I$I$%tyH' J*$Ij,WZ$I$io={d֬YAP+(]̯O5o;k|- /Aǒԁ!I5 ?>AG6o۶m\|Aǐ$I:8y9k#Iڰp8 @EEEI$5!$Iڜ??jO7pgpAG$ϟiN"Mַ0a\tQI$I$IuG19 e(?AG$Q!H'%$Ig&3kx $I$I$IiM<3AǑ$Qh4pIeI$IR븎x.H$I$I$Iu\21) ehБ$ImL8"%# B$I$Ib;yyĠ#I$I$I$uW,g9qo6YdIԆ;X"%H$Iڿ5a4u^D$I$I$\沓\%Rt$IR/F'\H$IZ21ĈoQDQБ$I$I$I:>)b! [#IjC0`)Y"I$Ij1ocKy,d!I$I$I$(f2y#Ij#222HII DJBH$IZijdfE$Igq 7C6/}y'xf0#IDFAǐLHԎuܙI&CDr9E<˳StJR{tRbX1 ץ6j(F3,Itf͚tIڔ8L|"8S$I H$C,$I$}!D&*2QAG$I$I$IR}7d&wN.AG$ !DJB)A$I$%S8Jx7-$I$I$IJ2!BSN/8*H$I$I$I:]\S~tIR!R D$IlXID6ټt$I$I$I$},fqw4OG"h4ɂI$IJ(" rGs$G*ғAG$ImeѮKIS$Is1):$E";HIȂI$Ij%abЋOntk=^$I$IԒ2 ,IE8C,$I$5qCpБ$I$I$It 39d%;AǑ$;HɂI$Ip9m W5A'$I$I$IRK Qf1۠HZ!Rr D$IjBP]vo}=zеkW?|VUp! IDAT\\wuݛUUU̘1c=Ν;ӹsg=X~S]]]o_555̜9#GIΝ;v,?|{1ƎKVV 4lݺ޾~_s'ҫW/:uDnn.^x!o6nСCyܶ)o2ydHOON_>`P(Toy[skB!rrreLee%yyyB!^}fSCkۙL5|| KC5=o@?#$I:x9{k/RC$Id4ȍ\˵,cYq$I-,F!bf!FlNlN1N9sZtؾ%y}oƍUUUbX*vi5m,žo4Maaal֭X,:ujהmuklsMlۜcs{BPgi0ǁ6M>z/~kpwǀ߬׮1j}sxXS=~]C?#˒%KbK,iO4)6iҤ<_b-8o|LSk;^)u=xIZ^Sb-=V3itbU11AǑ$yAǐJkojI$[t) .d,X֮]ˌ3^`]woAVV=7ndƍ̞=nݺ`.n^~e2339s&k֬!׿;ٳgӷo_ΝKii)v+<˖-x'Yv-UUUl޼N;W^y[zj=15X}:?zyWorSMfkj{ψ$IZє1DswT_S>_%I$ImM:<Ru\tIR vݕƵX $S!DTl*^qXJn|~E{~:Ć϶ψ#b@|0Ď=X,s11 6k֬Fs}_7xc>}zb]jC5w5$}NNbO5ǶcM}w\"M!x4zuMC8^};KԶ!DRc"/^boc :$s=={CJ*mCH%IM"pE@(bΜ9\tQ|f͚ŴiZdߒZ^(2;zWfoe'F)--G+--W^D"vEΝ`ƍٳ\]ta׮]2ilSUU_W.2V\YogqoիWikQQQA8Nbܜc3g^z~cZw}Y;<?pV\IZZGq%%%<#\qfmHΝlY0Fs6~ٜn}53"),]|ܹ-=y.%ṔϷgx){k/u0xIZ_Sb-=V}r?0AǑ$b=_=۷o:4Z7MiY$IRi#RWWG]]]I(=?EqwpK/_mƞ_\XnVboQQQA,cǎd8s1b7of̙̚5 70ec,iLcj'{(w{rr$IRxy{j9;^:t/I$Ij/3".U%Ig HJH$Im_z_u Фy<6o< Rg}xEť1zhO{/7kМcO>}:Gydbҫ/> BtMyL>}{7k_EڋiUy&4{*{~jP$Ip<<=nIk1$I$OPB ;AǑ$bH/&" D$Ij㮻:^uv… W8qb 7~;6mĦMxGm3ydnF^>3*++y뭷8w`V|AV\IEE|G<=O>{kRSSCii)O=h۴K~~>wq[l'|iӦ5ٳ'O>!; :2֭[GϞ=ꫛ\Υ8 F 6 9[f ۷o^h0KS^y$I3Ӝx|xI$IR{R@}q$IP$Z~C,&IjbC3>O[Rb@O,oyyy͛7mCh+_>ƎbXUUU_j]wun{|ڴiM`^>c;|lʔ) uW^y~_f{GvmR+[n >gԩ>@^wz$%%KĖ,Yb4iRlҤI-=y.%1Zp<89LsΫ;xiOM|9^cJR4iM:Į]ˌegQ$IȂ b@lÆ AGFkojI${GꪫΦK.|k_7k322xW>}:#F D1b3f3:u ==?O?OlۥKƎs=w ۷/D" ywUW]ŀHOOG=￟_Mަ%5XnF~3x`2228p r gnu]wo~~ȎatڕkVNtְycҎiߐ3<z:t[5k>6=?N$Iq<<9n y~xI$IR{t'wr$Gr+KR{RQQpIZDD!BaqQQB!̙Ei֬YL6E-B!w<_X(s@ 3j+f$Ijߚ:k$ص~ߥg+(+ :$ z9c?ɐ!C#%AcM"I$IIlʕ NMa1$I`K$I ~ůx#I0`)٤@$I'~eW9묳6l~;g/0I$I-LpjI$IjE\+U\EEPt$IAD"!RC$I$%p8g#Kl$I!t0bDÓijjKZ?$I$I2RS]W\yԡMf2qWqtIa;HIƂI$IJ2әN*~SC !'s2@2Iv4d$I$!^ IZ?:{L2&$F,8C|,$I$%,b4E;,g9p r:IvK݅"$I$%0j15,%ex'|~ͯ#I BcA$I$% nSGXƋJ$I:3d:2I$IԘɓwwKI Ϗ1{|ȇAǑ$5"F!,$I$)GH5 >F_K˥{~JJ ={gϞӇ޽{ܻwoCϞ=Ȩ(Q.j!D! vn<γD$ڱcVbլ]5kְj*֬YVVR L}eCAAѯ_? #??>}}X$IVm6lByy9;wdΝlݺ۷'cvؑ_UUΝ;b'U[[KEEB#y'a"iiit,B]tSNdeeI.]$++.]ХKvJ=va%7?OѠH>-!$I:;w+r Ν;+0`'x>@...N,ڵݻ7Ɩ+{ko|4s%wK.I$%j><<ڵ+0x`Ə5'_y9o&X~}b?'nK,K"I$IjĦMذa7ndlٲ倷յkWtBΝNHdff2`-ȠsΤн{wݺu#55X OB/0U[[˶mϞmݺ:vEeee,۷or6lؐ()++cΝڵ+dova[vvv["[zG Kɥ=xG838K$Ibw/Z*uY"I$ݨ`ݺuPVVrII>Eplrssɡ=zM5t&_ލ/l$|juw{/ӧ/$I]w>IOO?4/$I$uPuuu[UVvE֭[(X~=7n: 8Y/n[nW,Xq͛뭋:uDϞ=IҫW/C^^{$p71Pt$I"B$cA$I6->o֭cڵTUU{n|b]0@[G""5PYY͛z,_JKK"Yvv~FN:$ImI4d"G}TJsxQń ׯ߾WL=:uDnn~ 曬\^'E$I$IZEEŬ^իWfV^ͪUXz5}Y#%%^zѫWDQСCM;y+#k?w~E27ndƍ"RJJJذa%%%,YR6nH]]yyyN$CA r.g H~g#I:p8lAd,$IԪR_޻SN~:b 8ߐ'###1fҥf͚}ڵ]Pr~~>ݺukC$I@YYYׯ'iPXXȴiGydžMDE.{&˖-c\21%##}6Z02h L+I$)UUUvh剱Ҟc=ǎGu>!wY裏>ඍᥗ^J܏_aذa&>#C !33%I Q#IRD$ # H$I***TQVVƆ _/e.0h/^<zWSCcc999H$^wWČիW+O8p cƌ٧#KdggSTThJ>_b nݚ~ Q uqB$Ib,[>>e˖v| ĠA8^Ĩ;xz۶m^+Vb >cVX… }{̛7o g|[%I].$/b1`wqB߾}&L`ڴi D",i}^t)cʕ߽罋G|%I$OII K.M׿e(((`ذaL2B СCdVD>|8Çۡ"]|9˖-3wy'dffr1$:5K )<#1ʭmAG) B,$IXee%7o>`GII 7n6܌ ;z^zyf}! Mq4K.5kְ}zݻ;Mcyyyt޽%UŬ^{v(**sD;{i IDATҐu?;]y/^{#77N8QF1vXpwp-r:t$Ip!h4$3LI$hRGYYׯO\vO{{aaaU&5*qy)..7LSC?/-8UT7ݺuX[&U۷_CEg$I֮]3<3<믿Njj*ƍ7n#Fp*eС :k^7W^yiӦQ[[رc98۷oБK.pqgs6grfБ$ÈwF'TH$IkJ2228M8p`{EZR[uS ҥK)++cڵl۶sr^^WovM65Z'P^^~. Nq{޷PIFFFݘx?C̟?իWSSS@zz:=zhȞ%I$XYYϒ%K䬳3Ϥ[nAGFJJ {,{,7x#۶m_駟?1]w'p]v'O&+++ȁKygkx9 ;HRkC|&IvIeee_X,xn8gBvaaasrrB E"N^;пKżMwe]`-ohuXr%uuutԉĤ &0yĄAY (%^XǷjJKK?{,.ZzX{]5T0ү_?233[%I6eѢE<䓤3i$nfկt<]֭\r \r h?'?dҤIL61c} g8s=hq$C DJ>H$)J\?~e/D""wۦt.Z|y;5l"IjyhcVbΝ9992qzOJJJG$?\+&tRV^͎;Njm.2p@Gjjjk$I(?0{/˖-3gr饗ҵkנIZ8ssᮻg֬YtIr2uSED&rIڽ Bh4($IjXn:֬YCuuu''7y|SU?OI.I%h"-`a) 30Q20Z3<*(8ð8)*b[e tOt{&y6iRhz7]'79=Gcsxx8E*!QBCCݖcV:Z T*uTD؈2z Gh4f ţ)bLfst7c1A"-- d\TXM1N9Foßg $''c˖-6;!"Ųe˰l2:u 6mʕ+__HMMŲec9=PB)vHDD}D"G!EBDDDDaOi{z@9$$=BBk^[-eff`0f˶mu`v݉f 9̔)Sp!*kIMMMLF'DEE˫GDDDD}j*㷿-~Adkgs]]Qy{FѣG㥗^믿իW+x $%%ý74 _p<\Bz&J0N%:4VT*H$Ji]]]T*FG6۾ ؜sLFؖ>"##EO:fݳsXw#융8v+Tzi$7k%wvlDnn.jkkM=6YmMDDDD\v O>$v܉EW_ExxxmRCʕ+X,]Օ{V--yn9y퉡 3`՘={6-Z/;<7>G؂-xQ&9BQ/„"""~V|TWW-۾wh???J}>c;Νŋm p 렞d|fne:kJ&L6zVEZZZOEDDDDU.\@RRo>QFh.܌>|O=JJJSOx\{]u]]fg'yl߾=}Q=aa&`XG%72 !DDkiDD&;y{"C"`ΝfӦMHIIqȺɹ FGSSٲ:l5 H%ĉb |wpuuū#۵Q^g9k/};޶g% Ξ=#F|27fT*R eff䤛eڵ!ok mC#00jcht_mVwqK{7[_W5661˨06)12::"H|9HlOɓqwO>A``hX;HKKCJJ ZQ֕{Qp 1R̝;8rF)vHsq'D$"}7釈HLÇǼyK/ ڛ8|DDDDtQTTla֭ذaV^ŋ#)) qqq +0|pL0/ƺu{nhZ( h4`ؿ?222PXXf#;;صk6l؀k"%%IIIHHH@LLLKt~g̜9<lقI$H$O7n<==1a\t ;v,1i$u[|ۗH$xGٳバCpeաصk^{5Xt,+ {_w[ik{⯬ij>d2T*,Y'ORlO}}^%JM-Xذavڅ  SСCذaVݻwgŔ)S0|pxyy! qqq:Azz:Z-Z[[.69Xqq1MѣGСC&tf̘1f=wZ͞vv{]mY[kms+o'Lt}kիWtjjj̞zjc.8|04 Mbq4Ox#|>ćbCDgrBԛDD% v;yڸqMBaa!߿_زek O?,$&& Æ T*O& *Jh4̙3d!55Ux뭷-[BU W~ߙ 8/rrr aرcy晞o+̟?߮m[ml>ԩS?\ K,1ŋR}]T(++m&nŋBXX %%%BII{ .^( dddBssi>H8pon222l¥rL"?~\~gu^Sζ붗R ]}7n( ŋzlK. N ۶m[vWڳ_ϟ?/ :T8tP[[+dgg &L谾322(ܹs$rJ9֕YDrӿ---bН,X ,Xao/˅ a׮][o% ,4ML&bbbDjƍTuuq{_wSVxO,gϞ-]VtB}}?Sѕ:=)G cʜx~aƍu||#RT Av222 CDDtK=s'ϟ/ |v࣏>ž={o߾G/~(**͞+((F{l>D{b٦\{1b F+V=~z>}۶m PTػw/ƍHLLիf?~^P\\l[r?'OrH$|ט8qղumm)R >@G}~}hEaHNN6syvmf3?''wv܉1cX%//Go+WǠA. ݺ\vwQQz}^ PDDDMRrX7ߵkCߖ׍ubhZhZyyyfB@R!44J=Vwqv,in_xO,}||mXث+up{SW݊FN_t 0ȢDD{{.O"hZbϞ=5k0?OiiiAII >֭ŋ;]ҹ=rq]p׶s:sgx'PQQ۷}ٵl㮻‡~#G>s8|pq={`ta 4PA/%$uG"4uTDFF;"ד7eBQ1!BzW";;z:%%%())N^^GkkiT CT"88tPP\]]E,!uXnM"<<999P*fK$TUU WWϹ}fFyP(ZZZnRĉ'i|II &M۷+W0~x,YXGaa!̙SN{ܹ͛s裏lr!HP[[ OOOer] !bt:TvWaWbk_J%~\VEbb"6oތI&u:odd$͛{1KKK z=JKKPZZjuZכ% ((AAAP* APPBBBT*Z( 0!G~~>rssQPP| 0#22ᦿHT*VuϮ5Ĩ#u幮ji=?uT\~/"&!LBȭ-էQ'4z㔗~#򐟟"//EEEDAWWW(JDEE!<<#GvاDDD= !mڴ ?<]4,u RuV$&&ڽ:|nڶ9{:[VB/2N:oooݳl]66l؀oO<8zRss3 7x>8 @>>?!v(DNB-%eu///=#F1bvmL#6oތK=㣏>ի;,cl|8s=FpN]f!\.teee\T*O>7n{l߾-׿Ǝ; /okW9<<C&^0NL!"""Ǒk6:H邂>}jOm{6!b)N> #z ǎ#qSr:!YWQQwB455q-DT"..w1dKCDDDԿ 8hhhpRXd flٲn\u93C8wżyn: >Gwe!Y IDATAAAǎ;O?駟A{(((@XXء2>ŧx# iQo%ˡĄ"""";;was޺:SLlYLBPT*x[l?M6Y|]Vo=Ñu_ȑ#͞?vVX~겉OrJc6BQ;@7zU*X~=/J9l]%}رc>~zM:{Ŋ+̞?|buĉHKKŋMϝ={ .04… ?0w\9r=:זKJJ̒g.]͛7;EC\\k,]]Zڵk1k,`ҤIJ8z(-[wye׬YI&f͂D"~z9rl3f`ڵ;.\K,ڵkwwÖ+ß'8q"|||WWWW^qxYlw⋸/_#\عseee ӧOoooaԩBvvvCkoxA& «j Ͷm6ӼƍƏoW]yy%;vLصk[o k֬RRR~Ri3gRRR5kok.رcBVVPUU%v1BFFֿ`a[[^WW'\|T}ׄ!11Q6l;hL^{Mزep!BssEr=ukVOKK̙# 0@JBttv}gw֧N(^eekc,Q[9HL;S(//5[~u^ߥѶ B̹Qw]sήʹ<<<:}ԕk]FSS%ٌ' #G/u ZP Bz/0j( z`?/$`'v< v(H$ܹ<ӦMuw|TWW-k칹QG +R 7khhklUTT^fӲk`qԾ$33h~c|׮]Y[K׷zK( KDDt:Ga"""b>$ە+Wp=Gll! vuVC19WLL]HHR'"" Β ő?"t:<mˆqzȐ!ZիW FM311)))ǃjDDDDDdFRaѢEXhϟGff&233_bݺuhmm aÆAVcذac#zr 1i\v L"̚5 qqqƴ$P4a4<.Bz&3rfOFuuu6jHOO`0k_c~{HJ%El}.OesV-~.8sP(h4h,(7J~\pf4qdd$=1RBOQQ^ׯ7RVdXo'""""[Z Zŋ?#駟T*1|p 22dhMݪ W\ŋM.\?@@@FZ;+rd2,ÿ/,R)""k !!w`BY%!jQCC]ir@NNő <<<`WIpp0x*lc7k t9 V}"DDD}L&pl0,@j\SGikllDYYՄ˗/4[qtt4<==E,W?~<Əo|QQp9ٳQZZ FrIdd$bccMƎ"""x=,D~~>rssjq\t /^Dnn.СC1m4SI^H.Hx/C""rZQ8BQVTDDDDD-<<<PjZK>j8x hWi[ $aaaJ*ӳL78ڿ{؈&Ba̒&z+Ǐ7=6j`)a$22=U<0g)W^Ekk+@*b!11ɦǃ %""""""=S=_UU/_t رczi>???#22GDD"##JejH}C]]t:)#??wU磪4PP)(!!Fll,̄>*X10L쐈;ܘBK0!Dalh[Wdff t"t`(Tʎ $άfGQQJJJL CrT*XL %%""tICMMi~cE=ƊU1o%|梶`TH  Z$DEEE4___h4555E^^) 77?PF'7^ AhhsLI}}=QRRbmT*EXX));4%GIYUQJJJPZZBddd~ٲB@@@?x{{ ^^^P(#766EMM ***P^^`iqw 88JJ!!!R} nx#qxb9S!`B)rrC| {wE$"ЀR͞Ӆ8}4z=JKK!i}RAAA AAA R4M@II z=t:ixIכD&H ͋ѣGͦ=<?vƍuQ2d b2Ƥ0=5%j\v 0bJ %G<1&Ϸ&O|i;r1G 1r,gLp\.z&QՀ<'!>GHps7\]]MÍq6o3 ݻGDDDD󃟟n 8^z/mw:7ĉu=Y%HLW.jll#< J-H@a,vHDDNE.s^ !DDDDD',pg IDAT{1Go N:J`x |9?+Wƍx1h\]];M@THD8xYLT""#LBz  """""nqqC! d""""{tCd#HM> ~I쨈zW*(DDmqރ !DDDDDԧ$ 6I;$""""tA a18sƍ>X숈z/x=]؅=#v8DDN !DBO `-bAD:QWB <8zXx` I쨈zɘd$i?&QOA+!() 8v (,‌ #""""""""rj/E"C!"8~8pyf 0l0$"kBDDDDD9ݸe(qTL;$""""'zuB:  Ll&vDDDDDDDDDNxo_c9'|Aw:ƏCQBDDDDDN3| D;|q!Q?RA;7`+V=^ S)Xx O5bCDpK.ESS1w\`TDd&Sۀ HB~_0FxDDDD+C ^~ x#/̙@EQ97&Qء9ܘ1c0l0H$777cΜ9=ك !DDDDDЀ%XUXW 6aDDDDDP(;gc?$vDDDDDDDDDN'!xM38#v8DDM˥R){8""B锡 S1^"U쐈A׷B ;ؿ_숈N R0c8G+Zȡ-&bڴi!*" !DDDDDT~ď(D!bDDDDD\LPQ`|``Z@ĎiqIp* ?͞s)*" !DDDDD4>H@b wabDDDDDwB^yX~]쨈p 3xHEȡ{1455ux~ƌ"DCD`B9 ؀X$vHDDDDDn$ D0+%HK ^;"""""""""k1;NP{EDD鱋 (bTD&ш0Va^+،͐B*vXDDDDDJQwGi{qnL< BDDDDD$OiNN8ilt$?C%K0lt}cJDDDDDDDXE!gqD$s9yq?HMM5wqq1 ֭iF2ePLʖ-{UYYYHII2+Kbb"?CaΝ;gd2ꊻ;nnnSB<<<[[ۼMǦB(@S#<HEcEܹP*̙PDDDDDDDD*{ #t? H"Ciiir 6|)n߾ @RpwwݝZjѺuksA~ ད1Nݻg%6688ʕ+9zqFY|9C QD1.]nݺ'áCxWػw/Zٙ~rJnܸat\C!DDDDDDr7K_E,/}$""""g4CH.xطڴ afZT""""""""y +>cZӚMl-m$T;w 2w\N#)Dy(hHCr} M$""""c<\\`nMa&䩏sc"gNXXX|r>}:jzL&S[ɒ%iР˗/y!2*_~?.ɉQFq!8@f͘Щ?F'381IqDrUDD=zZj]ѣGsi{u놕a222fddp9°a9r$g6,Qʨ|zgq9͛ǥKhժueŤQ !"""""@_%@7M8ht$U,-/ (M3:H !7X#GгgOׯOtt4ĉL0777eҒ3`/^̙3$bbŊѯ_?všCfРAxzz_ntDy*H3 ao}O}={Rvmbccٺu++ x9˹s2-3LL,c[~ٳgܹ3Я_??vmڴŋӾ}{~,=Jv(^8nݚcǎ=0H%vׯ_vZ1Srelll([, """"Ӹ#Gо}{M69r$>|VZaggm۶e˖-<>c۸qcy5ԩ 8~8͛7gذaԩS͛B7JW[%P#m{IڶmK݉ԩSW^ݛnݺqns 8K.O?q%Fiqttd߾}$''qF_yLtt4-[cǎpiGΝ|ǏW^O?č7hӦϲe 22={?{$'q~SR%k=Jjh۶-:Spjy"""""H#9)F42:!DŽ \l!0\eˠX1Ș5#B##QQQߟ'NoѱYzz: l߾N6lXPxqL?asL^Jhh( 44^{^zFX3H !"""""@44 I$=*HO<%)5N}; mBrщDDDDDDDDrU*BG䡦ONz(Y$ǎ#44AL&& KKKʗ/ϸqXp! xues%? d֭۶mK.Y=,m۶}vSdI.]iYÆ ԩ7o&fTRFxxxR}۷g۶myoǎdZVZlg8u;vd޼yr*'q~׼ys~gFkF`` :/""""""c iH)J}TёDDDDD @ݻ!:q 7"Zjj*dܸq{l߾+dddo/_> ((ӧO?K,龵uE)]tee˖%11|ŋf=Gss%nbb" KKKsVX/ÇTRƝ;wc05!7]x''6mpunܸ9׸ ?dΝ8p mt, BDDDDD Єg#.\'C5kž=p4l'OHDDDDDDD$Xc|r-F"55Ν;~z;BBBx6.3vttdРA 4w}7&L:DnJHHȲ)S|ɉ,[S_ۿسg)))R_(Q3fpIbbb>|8K.O>ҥK9o7o|Kŋ?(@hh(=fَyHN^イI&DDD`ggGƍߌ$lZDDDDDKaAёDDDDDx5C<<4Hִ#(RI}Dtzپ};mڴ1:R6l֭1$~ IDATr!+yc˖5kЪU+VZv=7ݽ{2qD<<<ȶa2?z֭[cZnͮ]<'O5k,>L5ӧcѢEtڕ'NdMٹs'p)#Q!DDDDDJ' 5^c# z """"bB\\`NR!"D"""""""">!8>cM4m۶iӦl/V899ѱcGL[lɛoI\\ׯ_g˖-̙3'r|,Y$ꫯ #44<&44ӧ3o<IJJoeܹ>˗v5jDHHODGG3tl7x`=Jjj* |Gh">\$]w}G>}%7M0I&΍7䥗^bȑ<>-Z`ƌk. k\/^7B=̚"_""""" nЅ.cKXHˮHaO<.rt-[~}hv6:H@B!Pp8"߿04h`t'f22WÆ _d2qEO>>>駟^σmŋoT*UbժUl޼ʕ+i&V\id,,,YW_}`RRR_>Ҿ},Nbhڴ)4lؐVZҥKR e˖eҤI̞=Ν;i>ooo.\Hhh(NNNtЁ=zd*8::fz޹sݻ7Ѹh"؜Gk\P988b 8qqBDDDDD xiJSpIOò4hЀ>>iii޽___lll૯4ڵkPjUlmm)Q-[xDzz:>X[['|e hРvvv1@BB |֬6mukXD""""""""bW\rez-g}9} *OOOBCC5kqqqF)T)$q4 W>^EC=Yfeٳ2d{s/_ɓٵkyIKK#<;EW_Сл7̟ot"'֚t##:uKڵk)_<~~~FG)L&ScÆ TPggg+رc2˕{JyFozDYN1) V^ŋHOO>cĈ1gϞ+ RJqyK'իW'֭7pzxxdYVdI._l`>g...e<x)B,e5_4 Lѣatӈ{ݛ!dX?4""""""""O-ޢxwre}:%ɉDc#HKK#((*((4ccp666PWu:ә,d kp#ѣGsfϞȑ#3=޼ys֭[uY[[SNشi˖-ˋyLJ 6dY~z||| H$"""Rx%+ٗr5>́i $Mw"""""""R@8_%9+9,ɩիsA޽ktɡ4ix}OA1LFG)Tys5x1J!"""""""R81)X9,y[m۶8p8III(Q"GcK,ɥK2-kذ!:ubͤPRL3uD O>_Y龇ϟϴlǎdZVZ,,QIII971իWiӦ Xkkk# *Hi*o F}FF7n`888GDDDD\$l !Zm{d GMu^KҧOLʔ)Ë/ҥK5jT(]4/^|踴~"OݻW)))۷5jIDÏ(nڑDDDDDh 6m P)DDDDDDDD t%-h 鱲⫯⣏>"44͛sic)]thݻ'aA͛YƙL&prrW^|lݺ5؁駟n: ؙ5kFxxxeR(HNNtҏ;5j޽{Tѱ/T) qF4:H"""""ϜxG"OiSظޭo_BDDDDDDD@L"1:BcƌիԬYPRSSUyyy ֨Q#BBB8}4o&::CfsQRSSIHH࣏>EYuЁӧOӣGlmm; 4iܸqH^z%Fi\\\^^^};vLJ/?UVaooot,Ɇ !""""">є,{ً'FGy&% .0Er_үHDDDDDDD$GjPWy7xd jժO?oiӨ]6W&##hV@@?s , %%cooO`` ۷ r_xx8ŊiӦ888аaCRRR曬3 1bD~rooo.\Hhh(NNNtЁ=zd)/l2Bбc $ իs%C|n5 4f;(C#<HWcHnj6l5:H{E )kkk&LcǨ[.ݻw^`ƍ* 00 fŶ HHH 55(zIFFF) 5kpERSSaƌzj4iBٲe7LJ~ŋs (R2|&Tҗd%H""""" !ϸ֭aRXƌ1:C:ֱ-FLJo'NЫW/>s*WL֭Y|9FG|fYZZ2w<[FFof̙y/|j+OPPʕW_\rl߾гgOA (BDDDDD\f5:HO<.CR.f 3׿N#"""""""PӅ.GÃiӦqYV\}|3CQKHH`̙ԪU o>&N7:Eh#Ont$B C¢gOy^~`^e^UjSH"9VzuW?Xn6m7ի-[sAbŊFGW^?;رc)Rzѿt邏&"""""b0&d&3H"""""ZdmFN%"""""""ſ7s(FG$XYY/1b/_6D""":u*.\oooUԬY[[[#wC tN|\:tRXYYe[qrrD8::RD,?;88IMM6˟ T\ggg\]]qrr2쌝_/L\r:ўFG'&q6:WժƍЬ  dt*f0:a KG?zE\r+W:u97oR[rr2ׯ_֭[$$$pmsʕ+l.bRXLe%J,ErYfIy *"r !""""" $:щ_F42:<(E)2:g?nm HDDDDDDD+]H"lmmlٲTn޼inݺEJJJ\|볳c̟?իW`aaqf3HQ!DDDDD mhxet$yD $1 ~F2:>XLfoGDkkk)Yd311{///=D d9HCا2HO !s}Cp0Zetqf,c9y3Zjdddpq*

B# #A b(Co(JQ#'^ɣ ڵgN$"""""""?>01FGgs=;}uaSRPDMa[e735[iKeZ7P7BMRaQ6A᝹ 00g`>^RΜ9sX5XUA&u,"""""Cy !t{lm;7[E!EER'""""""""' 7nQ;Ȃ"#"""""TS0[0OHDDDDDBP T n3kP\ LTTHL` DLsxը:3HMMEmmQL BQBD G0 DDDDDDw ˨DgRƢر8q=BDDDDDDDDd{PBX/u"jgP^^L< ld#(@~o>RG""""";4t g8 oMtAxC1rȥ KmK>q@` xԉȄ\xƝOIᙙz-<# <K1 g#Q;8w|}} CdXBDDDD& NM " '8( HZZ@2 pl^J} +\K!>E\:#AAAHII:cA*0c:jqk#"]HZҽUjys?#LEGݚ-d"[aL>>oiiGyĀ< _,Q;sI L\;x;|XRG%""""FE-;gfl x{* )*:)S竪0KXbV 8R!v" R 2y,!""""^Ӹ#p-j>Kx ˱f<]&""""ja0i=A_b&HGGAYDDDDDD-}>L# @dD$a^ RG!v" YYY(++: I7""""29)HB n&:DɈP}0yf0+xrzJ4DDDDDdf̘ 3f̐ ~RG!vB(J4~EDDDD&٘=cDDDDDDdh1A88 0MDԮaR!""""v.&& ::ZDDd,b L .\: IcA8ءuxH4`2"""""2;i,%sD]|x`Q;ၡC3c{Ɉ,r! Q(DRG!2i,!""""!  af-G9VaR,`!1{9$0{Ӧz9#u"""""jǦO2"2=r2,CʤCDm?g! Bd|F% Xj K]T}9Ы0q"PT$u"""""jafafffx%LDDd) eJڸB$1IB^ċwU,a 8$$av$%"""""CB5!q"2 VV?I@Mԉrrr˜1c`aa DFFEXDd$:^ x"yR!63I!DDDDdb-2ZU Ud#;!LʘDDDDDd@!-l!(8Ν,: SӦMCMM jjj0uTy.')u"j2, QURRF6=;;;X[[띣XeE-,ajT>܇XHDj5ڮ2TVV67PQQ=`()l58{ u뀙3>}:3&L `!"cc [,R<, @HDCRK8D&!DDDDTii)n޼ׯ (**Byy9n޼"ܼy7n@qq1***PZZ.⨭Eqq1͛(//())9J 666uy(|7,ا"NN%fPkkk 666ppplll{{{XYYIV""""@՟(++Cyy9_G)--Eyy9JJJPRRrHeXvۦ xt^/srr9,--} 8::j]mmm'''ڪ#;;;̌Ec#DPTTtddd ==]@FF;>t ݺuEţXUxbI ???… ,! BTUU pUEEEYQ\\4HRaff\L+++`iihE]IW5+HB)**rmE*u_uE#]p& $(..V7nh2OգrwwwtnnnMnȘ ??(**BaaFߣu*.oLݢkmVVVP(GQATd2+u4U?_Rh[v (//GYYߛR;t耎;CMnGgQQw#"""":^._𑞞PyDDD @hlѣEnn"u FuwwwZ96xq:1FjjQL B (W\:Bbr[[[8::pqqzjh3KU_7oPḱ .4[caawwwtAԩ<==ѹsgtܹe""""2i|2rss\u?D555ZOggF ? E\r)))o޼MsssuaHǎѹsg?{zz[k?%w@L g`oLEaa!J%R~\pA;CWWW( ( 3B@ճUꪲ/_ndܹk666}u=01/eߥCDmP@@.\ u "oPuu5򐕕\\|yyytԃ\:{{{SN9~~~ TgggXYYIJ'''t鎶SZZb:ݕUE@8{FQPݻ[YYs񁇇OoooyW&""""*((@vv6.]ldeeˮ]LvF \]]MT};PSAT?}u|cΜ9ꢋ ۷h6 Y:Sh+VIHH(bB___h~"j9˱0;~Qc,5 .]RO4߳4fpssS]}:Ϋ&5EEEUfɓ',5i+))RDZZZRDyyz]'''^^^ zj`g!hWlll OOO֯F~~~˗/###GKPRR~-ru BR}ӧ={fItT5T >Trvvv FߠXDU0nݺܐ#:x1 -%"=aR 2Y6nEHIIqyuGvv6={رcѭ[7Rx\c...pqqAPPP]vM]re#==7v܉,Ǯ\.G`` ѽ{wt֍wd""""27ns琒du$-- ԃ|A( tU=CK1' uIHHH땖gQ$!-- Ǐw}:tPݻwGϞ=@dMyn+u""""""W~GJJ JKKСȡnB@.]`ii)qkZ;?HKKS >>>ZEX0Bd]@ cxL8DԆ"++ :aALuqydffEѻwo?^cβ@ nnn|UUPD5Mzz:}vuݻb@IDDDD&իHNNٳgq9B СC1sLXN=zGZyk{XUUUd֭гgOGٳ' "#@RC〳ԉ5m sΡ @]vLZ"$$DǏ#==7npҥbD-Cfc6b)b{3D\\t ~~~R!29< &""v8~8?d:uJ=t3gDpp0 z;Raii0MQQ.^~ݻVR`S}>((w`""""CvvF̙38{,5&,6={DϞ=|ǎCqq11p@L8  AбcGk6660` ^&@JJ 9#G`P[[ \cذa5j 鉈\mm-N8={8rJJJP̛7C acc#u\6aaa S/+//Gbb"8Çcҥ(**#BCC1tP3o7X8tN8)3U =Ep`}B\tt-%$$… (..ؖbB@ {c)b`>RG"6@.CTJ$ RKh IDATD||<믿]vŰa[oaݻ7,,x*C$5L G\~Gő#GpQg] F+%$$ŋ(**ؖbBݻP"cq,|oCDm\.ǡCAd8$W[[}݋ .{/^uDDD ((HD#'''up^F\\g Fdd$~aBDDDF%33[l;p1d2a…D>}HdЫW/ O>$ԩSؽ{7vލiӦA{&LɓѵkWS77 6^_:ZE|"qkZ8צߡ3g !!ߡ&b"B2cqr磴R!2),!"""x"6n܈X(JGLL g!2:t@LL bbbP]]}!66˖-s=aÆaԩQFA&IWw?> L< xxHځf7P}j VWnsvf]0~HYv?\]] 42]! :1\HOOG^$NCdZXBDDDիWcBLL &OK$daaѣGcϰ{n駟 /Yfa DDDD-׿rJ$''cذaXf z!i„ 0ag=z4pBL>mdƍ@H0u*K\s999uU aaa$aKڧ F***VU0Z+􄧧gۺ1P.^"u ><޽{w8::JR5j ;_koY[[7Y0RYY˗/7(Qddd@#@^c9zb+&cqH١SNHKK: HL"""j)))xggرc%ɢE+4iMvfjKnI^_|?<{,]=zܹso.dDDD'N`ܹ8y$bbbK2 '~Hڟ7n믿sիvZ 4HxMn` p),,:[T… (..V[cΜ9ށppp0X__~”)SZtkl-Ʒw_f?AMM 0|̜9U-t*R`ZE |}}af!LQ 1S*^E`60*IB. H,!""%ի/W^8p "u, /x.'B%e&CﻵgfffFTT6oތ?6oތCJ!}] ɓ',y&O&Cڏ.]`ժU?>͛!C`Xtqr?x`T௿əR|h+8}TnS B.C._pvv:Qڴiz _}$)i%児S0bee" ݺuH- <7&fb&`}cǎ#1vX>}ٶrղd3NNNpppqp8s Ǝ 8;;cĉԚ_~AXXlll닅 jܱJ`ܸq`̘1:u{deMkV]SN!""prrBdd$vՙaر#~,]s,u$"""2Ro>̟?(AshS yn}aee%9"u&>眍k+!C'N[C:t萰~QB )u""6A׾Xk?vHLL۶m˗/sgϞNP(",,LDGG^zI[NNj/jbp5Oq]wnTƿW_}%G}nPPg>{nQRR"233ĉŬYmCcg^nB 8P]V2Ekwgƍk-핶_{{{稥!!!Z?Gn !EQܹS%%%RG!2C^?hl) BĂjI ʪ heok.q}~N*V\`| Mvmlll}ս9m4f?!n]sss9ԩSŊ+,߰aX`A;ٞ컹cMm眍Uk}iۮ)C'v[C[tpwweeeRGiZiB QS#u"""ǂRdeei-P(¢AGxx>}: >t\T۫nTjkkEΝ5n1rH/BCCRSSEΝEmmme ~'eΝ^^^Ͷv͖(O>wsۛ;w-nܸ!uSLB(D'xY(DdN:%gJ` D'HG2Ȱ[1 BL&֭[1iRO9̙*ۮ?ƲeːKKVߝdhH& vvvZ_jRRR駟ƍz ƍzxxرc֭4"''綶mL&CAA+,,L=vcrssy0b$''/CmOϾ;$['NWW*1bIa;v,ЩS'hR}o&m"vVپ|> unv܉ & ??iO}9kSK75!pǭsعs'Ǝ+u?˗ JȨk'RDZZӑt#;;555[[[rt v i[ZZ h5kꫯ"++ <O9ׯ\.LJ~$|WM&ڵkpuuU/-jkkX;٦>}枻6ooO888htK|5 }sd2t QQQxs>Ҕ gx?P^^077|}}g\.B%LJq>:Ba޽:AAcM-Z}DDDedd@Pb]5upʔ)ѣ}"--h鉂nc D~~~-+={ 77Wʼn>^ ___ڶtͮo[8OOAA:t6njRQ6Cѣ == BH2.]読O9lM-}.C\?A -}ܶ; RGi^Ht)+}}H(a O>1h tY-99~~~ Ydd$֭[W_}[l)S111+;v`ܹ7[ ֩)MmZϭ͹s3`߾}bes7wMvr7Mҍ'<==1h ѣؿ?~w>|h'VaJ+q%Bff&jjj`nn.uVױcG!66gϞł n{{:t@NNN 999: dzٵk]?//999رFwzjM>8x |||`aaΥ: 99ۥtͮmiǏ; [3,[ ?{i-FT||x"""OOO\v %%%ptt:N2>M%[b_-iO>n[Kqq1 6W^)SD6 ڛ#G?zwq \@Q^􄙙iw֭[x@~gw}eeeŠ+ЫW/ZOk[tajkk1SYB222PQQD׮]|rIܒ6XxRG""#Â"coQPP۷K`fΜO>?3f̘kH~n~F߻w}սJDD~۷Ç_"7`ؿ@)nO-ݖz > :u ={lɓ'cؼy3|Axf}mgaggK 9gcZ\`?!p;ǭ_>|Qtcf|5s8`nnaÆa̙x__RDee%]Dlڴ >(r9rrr?'!CaȐ!4i^~e|HHHRDuuMms֮]}gU/tꊾ}O>AΝճ~~! "[kҧC>|Is=5kVL٧յ-\7gkҞؾ};VX'xǏǀ 2<6n܈d`ĉXz5qEܸqJ+Kk9s& f74Y5&u"22]taA "" VUN[nݺVv}=pwwiiijHS &-Z~}||ÇEeeػwBqy- '\"֯_/Ejjj۝1cx衇ĥKDIIسg=zvo}uUŋK.bÆ ʕ+իb֭O> ;zh㏋4QQQ!RSSŌ3 --Mu]b@\~]رCx{{~tݞy?i휎ORR񢴴T:uJ_|Mn?66V\/B{JE[""jQ"11ն- muSxbaoo/Μ9#u&sMO֐N>-ҥK͛Ʉ0־t틵vچk׮Dm6|r`-BBB KKK)BBBDttx饗ĺuD||x񢨪)ۗ(//iiio#Gwfwz-aoo/|󍰴oV蓭k;;Ys>wuqZ~;m嚆>9CCCEXXXMI9spѳgOakk< Bӧ7<)Tv]ȄL'BDFF:AcMYBD'PKjO!eee_~k׮ٳۯTh:BQUU%|}}Evv޶mJY\RR3fb̘1"))I)S; {{{1~x DZZ"99vO>-"##ppp̙3 'O.:u$D^֭["**J8;; {{{1p@?4tٞ>u}?tݷǚ.m9| 666K.wxYcYYYܴiN9 /=8!DD!J&?iS6}isРA"44EeeZP(gcΜ9^cGV+j 1HcBDFdٲe"((HDc !>IDD:A؊I$ujd2n݊IZx1gΜVٶ6qi|駘:umh۶mΝ;_KE/2SVVV /ke˖IqmۤAD?~*Wo3緡M1n8ذa*jL۷oٳѧOԑnOAЫgIȨk'B(Jdgg#''JRHMM뺺BPh<<==兠 K"2e|2R LPfffZR0 ~p :7駟FYYQ oj{&"""㭷Œ3W_aʕ:Zd8vVXKȤر?PXXXlllԟ= Gr2L!##1/e"SץKܸq׮]qL BEYZZbٲe1b,Xc֬YxW+u1d̛7:^T8K5{o#G`Xb<<~Ywc'vb$ϟOOOS[d,mvv6>c|gdxW3ϴYA 8XXR4DDDD'WWW"88X EJ%:]w[ڊE MLUE|| x0f0:InAH>}$NCdXBDDDgƬYeZ FHHOh^P"" n݊͛7> ֮]H;;;,Zի'GTTMXX)Q{U]]8lڴ ?駟n<<>y: [[[@nmn޼#gΜABBEۮZE bȦQ.>][ejMom#[0M8D$!{{{ҥKRG!2vZf̘3fkgŰazRG%" ߈Ell,J%0w\̙3EcDDD*dزe 6l؀:u?iӦaR$oAll,> SLZߌ?gYPQ666M46RDBBB }pV;l?u>Ts2 30Kш5L 5k׮,!2 A"447oݻȑ#1n8DDD{RG%VRUUÇ#..?3Μ9]bҤIAHHDX[[G#<Ro7oի(DFFbРA077:.騦GŮ]#%%?>M\.uDS 8X XB4DDDDd$*Q Lק`DU,"! <"QXXSQXX^WUP({''' [BԴ7ŷXyR!" ȀXBDDDĉ1qD_~ŋ駟Fn9RG&;x~Cii)u!44_ ,Y%Kرcزe lقz nnn5jƎ1cƠSNR%zg޽qqq(,,\._:w|BCNDDDDDm@s#|rbUHFFjjjh/Q( PVXX@T… (..V[#<<\ 00xOI703G#DT*Ad2XBDDDspp)S0eTWWرcC\\6n0` c8558r9_pvv}݇{~DDDD${sVZsa׮]سg̙*#F!C:H8x ~78q6l,Yc"00Pelny`c#u"g*7@vE\@܅עRfjhfe9-jT:N53Ť-j3eԔ8Sie.,* sp媨ay޽˹syWDDD8''ZFl$naٙ,DDD}Caq!-[9s9**֭['"o Xj^U8"b;c*H8p /&''-[+Bee%aaa 0s5лwoFl߾۷?G֭eԩ :~Sirb޼ye IUUQQQ 8RܹѱEC߲m6;#::k~nA'\г',] )((Lߓ~~~ꅏBCC/Z<^5g)DZ@rrr(--8"͞^HСC:t(UUU$$$}v6mϧ{{{""",E={ңG:t蠑"QRRBBB{aϞ=޽]vqiBCCٳ'cƌꫯZDDDap-p-PVVƎ;ҥK۪ һwo"##qtt4x/DS^^Nbb"v*booO.]ӧ0`1118993yFLJK*~8@QQ`:?$$fYD'7}>HOO'!!B " /wd& *233 5:HW"""Ҥ}v/_ί_ϼy۷/;wd۷ RYYiBPtt4ݺu{K" *''}~/Gbb"gϞёΝ;_~*Ā0`eYYYbΝܹ~/@hh(V'={-iV8|x|o>JJJpppK.0zhbbbݻ~{{x56lN$""""r.0b. _p2 #&Ť]pRLnݬ #pAVy?H  ==]#irrrxxaDzNPP#GˉRZZ @۶mK.ӡC #((HE)))!%%=JJJ G;FEEδoyZ!x58sK{ID'itf4),_˗ӪU+^~KB<<<߿?Z^YYѣG9x,r!;FYY`:+$$:V,kdggαcǬJ cǎ 6sZNjӦ{!"""Ҳ8::ҫW/zeؿ?ݻO>4*%\~~>;vdGrr2999YNҥ Æ #::(taFxxӈR #HRR:t|m*E֭j2RSS>>QQ@.Ri{Y2^^UHrqqۛt*HPXXիYt)w}|c'=6ٲe5K. $8ɱ:1#GkYGll,ƍ|䓜__X́)SԾ0rn!>>Çg-[e"##qwwoݪSgaqQΞ= XOFDD0x`/\i) lfص{&"We'H"Ҁ5!D/~"""ҨkK/QZZܹs7o B譪"==T2228q_MFF'O\ڶmmڴm۶g@ҴUVVriɓ'?uղ;;;Ӯ]; k׮\oߞ@ڷoOpppDDDDqrrr,aKnnezĉ_ɓ'-Ѷm[1Iv,#m۶% 4c ی 'O$33ǫLPPHHHwRf̀8~3:HRˆD|| #FXE t^%7x8"ҀTi *Hi&Νɓ'>}: ,]vFDzbggGPPE'/i):uL8}4ݻR(**xGGG mUVh)))!77 #''\,owt|}}-'BLLU\i۶{.""""M7t딖f)nDoDPsaܲHDD:u [8QGLwq1M/4 oXFΝ෿nAqđyL!H#HфB4: >>ꊓ[GGG-H6XM.V:}՟(++𼏛ˣ Μ9@aa!eee-QZZJAAgΜ| )--Y8wlq:tPS}򋟟z)."""" ٙp/nEEEIv ǎcϞ=Vϻuooo\]]quul)!/j+...}KhQ *))0RYYI^^ղ*1yYqq1RTTd)W/"NDDey}a,aHh"""""-'瓒ѣGINNѣsN>CN矆GXX:tz@JJe[V(:tCٓ#GnٖgRa`mnaxh߾sH$^EYH `۶mFiW i4rrrXhٳ'[neF2U)//'773gΐGII [&\PRRb)?Gvv6V'8*WOfj<<.t뇛[giN7߄m@iNp/u;:DGG7`z Sn۶/t3f. '(abs+#?r5Hs@YYٴi8"͚.-"""ѣ{!>>W_}UeweeeiҠ*l~ht5oF2(46mD>,MC7:0QYltib"RTwInvƏϘ1cسg7pѱDDDDDD(-- һ7LS"""""8a wq8X;Ê?n*wƙ "Ҩ,f1QDkL4*Hڲe ݻwfͬ\cH RVVs ?aF|,4X$"""""rF LcQmGx]8zbca8{" e(qOQDbooOVVQD=BDDDATUUdn&zÍ7ht,iJKKqrr2:HC^iDDDDDĆ8K_z(ql p=Х "P{gV("RT IDATFGiTzرcYhK,aݺu """""""фx ?$"""""rB 46  1Fg`\1 q78SEqDiBHP!DDDDծ]۷/?_}>(vvvFLBD.,1ӈH5@)L`Qa HKgMCFo5:H%`QDiFBԛs5?$"""""" !"qDDDDDD#[?xz> AViHl,YF'i1baxǩ8"ROTi*Hĉ{/7:HIK9s`֬v4ݪ/3ǴJBDj }~~4"""""4ist;テ; :OPS)>'"-s<>!ED !" CSUUU,ZySO|rZjet,u8g?nM:ǏVDDDӄZ07:o1M9xL+LŐY 1t"Zw3q,d4S*4 BDDDΔ1i$yy-Zdt$q gByٳdZWDDDD UVV !"ag/_|7FDDDDEn _ 7;=g:#!>t"bH"`t*4 BDDDN0b6mħ~ĉ$"pLWrt<:NN0mi]1Tii&ֵˆ0>Tj"""""F/8q&3(̝͛ IIq#!зiHy E.taXBr8"RTi*+..fĈٳ!C4XDZI.G20򈈈yiB%zWXqq-IQZ{{t͛aOPXtrHYBHb댎""uM6RQQatfM"eeeq޽?}ID]BT2@DDDD !"[7Sᬮ)""""в>aӌ2Ę;fBpizFi:ә;,4.ҜQUUENNQD5BDDD䲝={ɓ'uVKct$cM5;:ԩ GDDDDl҄˰p!$%F'iqqd,cҲ= +VO?A0dlUUF'iiI}7:ԡ6mZ"J,UUU̜9O?M6qUWIDx'Q0~|4!D2tl:Yqԛ8xq7:̙olhZ6jVAqDNtbxg4%DQ!Da""""eټ{lܸk8""C vv[fgW]+"r~N"""""ba4Eeo#GЯ̝ aa2F'iRIrwx("RGTi*%{WXz5ׯgȐ!Fi\L1޴LDDDD ML;o}VSBDDDDDjVӅ. `QB+M%ٳ 8t wDtd2y(8"Riݺ !"L$[l_dԨQFi|Əl؆!"""" !"W' %֭3:HWF>Әv1^@i:ȉj =zAiTUPQ{9qֲ("RGڴiBH=S!DDDDj-%% &0n8~GDqjU+t뮃vN%""""hB `R4:H1CwqQR9;o:GCd$XEEF'i:ЁL2HhӦ FiTZ)((`ԨQoGDq<Wx4,""""R&\ 1>$"""""ZqMbt\vv0xi:Ȯ]p=f$""VIIm6:Μ9ct f"""UUU1}t駟pss3:%+((?K}ЪG2)>K'''Zn}'"""hBF2M Vӈ4K'8|:EJϞr%<,˗&<\} EPB492'taݝc4k*EX>͛7ntcTUUQQQa*Cqq1%%%sY*++L'wuENNeUUUZ=WU]0g4fyyyTVV^ף޽ Q=^^^VܬNj[]\\puu:qww^^^ӪU+<==klEDDD.&ԁ L'+m CFDDDDyƛ4:5xQx!X^z a\3{1 o8,ЄB\о}X`-#,RYYInn.gϞ%??r ,s2K \֨ 3gPQQaU(++u<==iU?άC]\0sppjwwwϛVo9eGGm*J1+fՋAfNWӧ1/զPt9OE=`|퍽=ޖ҉qggg,yT+uU"ϫ"""""RֲILMlam6xE?""` dxiBa KP sww'55"͚ !"""r^L8}裏G (..̙3S\\Laa!STTDnn.EEE_QTTDiii2Gmdp.ic&Oi1Ջ+oʦ٘6$%%]tU+n'''<==qss ooo:޴n777<<<͍֭[(WH*++&"Ga`عbbN#""""lle+ $e4{nk2.\S#@H Eܓ<omtL"RTz8z(vzHcV\\L~~>yyyQ\\Lnn.Μ9Cqq1Yɱ:|.vw@@eQh.T/SlM$6;v؅fX~xyy+퍧'^^^xzzZJ`""" !"uOX }4"""""Fq/mtih;Ê[r_ AF'i0A1...Xcy?00hwzI-yxx 3ϴ?.v?--}x,//JUX۹?x`` vvv9o"RzO矇Pӈ4ywREԶi*+5ט3> L; X~7:\wwwMgzU("""50k,Fɓ#MLQQYYYdgg[nYYYeYYY䐗G~~~^^6Y*+{yyLnZ~8[DrJPPPl\II''瓛Knn.deeX'//S?[}}}-6mЦM}}}qss "Rxt?4"""""MzSJ)`ti ;LmO{ŋaL5 N)Ro ,e:5%D P!D""""5,Y E T\\\qnDzmN:9ח@<==Y0/0ED.eǕ^*1EFrrr,痢w^9}45bUU 퇈ȹ*++Є/O> F'i6n?Hc3hv䈩쳰d LAF'8Y7:4f;ŠBH\,[f8|8< htB:@ o41P^^qD%BDDDĢ3gҳgO̙ct|9uN:Wkwuuߟ@ڶmK퉍`A/EDҮ`WQQqHVV߶Z[mۛڶm~۶m-{""-Rii)J"u&O?|N$""""$GA fQ)4> 4A$&tl6q"L<ʣqDPPPiD'>[?ҪU+h8qT9~8餦aQRRbqmڴZz^:!||%""X888Xݪ2228yiɯje...3&00Pi߾=xyy4!D<\ 0diDDDDDJ*yw4Z%h F4v4M>4-dLS9"W,^Ef1KSBD@B+r?L8X4[dffr1K#--F1DHH]tqss DEDqwwSNt떗[!Nd߾}Ǔj5y͍ !((yYv4IKDЄz iJ !"""""_TRTHSkŦ+i!? QQF'l_+o&sqD=sID/ """Z#Gi&4i$''LJJ 'NIeeevHpp0\Ki߾"""uRݻ&--Rެ^_-%3{{{h߾,Ҿ}{'<<&Hфz4g$ 3:HGr-oD.*<^xxz GXsa`\-e)K \p1:ԂyB !"Ggy;Q˳=ŏ7d &$$75AAA:IDDחݻwRKĉ[ڵO?T***Ӥs9ӳvODEӄz4f N2z9ӈ4YdMdQ0@xOӟLS{)SE'KoGmt󄐂4_*W'x(+++#55$[ZZ$%%Y!""FaAhh(z%""Ҝ9;;F~]QQɓ'-#̷0 z]Add嗥""re4!D98׿O?wyGۍ"͕=i +V…0k0Ҧ)E.*@y3'NBDRi***Ȍ33:N!))}~DΞ= nݺ1rH)""" ʨ;wdӦM[ [nDDDmѐ$"iBH=14d&]'Fqg<bb`xEX^}^xƍh\УwѸ#-=i ̝ O=ӦRPBLAŠ4j"RH ftBbٳgIHH`ΝO?DYYDFF͸q㈉o߾[DYHNNfΝ,[("rׯZ^^^^[ЪU+"##!&&AѻwoZje^MiNN0q"^ ?""""""FFre S"bwoXyT y5a8?w7:O$kXûTGD.@eot1Fyy9ׯgsPg}Ƃ {̞=_~vZV&""HIIZ>|eשv :֭[ɰa/λ"-#ݺucʔ)Xm۶Gbb".Æ #99 7p ,?JD)*++GGG4oӧCr2|IDDDDD8⸉BSj-'`*عzA`&Hց]<˳TPat**H}Q!DDD⮻2,Cvv61#66___n6nHDD/2v"//o_~qѹsg2FB= 6crK1o×mcǎxzzZˋ 6XUTTŎ;rKUUzjuf̘ٳIOOgǎ#>dFIRR)))L4ѣGzYnoL:loNvv6<:G|n8r~;&L6fϞ#Öaaaٳ*:uꄻ;tԉ]vf~׶?s)oNaa!7|U%K.?_~[/K/DXX7ndpUWO>> IDAT㋈\R/X:УktF'f1(" S޽y3 +V@Q z'8Qֱ("r"R쪪TvرcQcW?OVb̙6{8x ۶m綥_~x)//'""3p@n,Hsp٪ԟ4iW]usεZgٲe/]j{&6۫"007ҿc=… oSaРA_<[٪?qFFmY̠A8qU={2|뭷ؽ{7+V W:ud5bocڴivvv?`馺Ν;wum퇇Gײ,99}۷/qqq 0_~elbܯmbb"7pvĉ 6jIbb"QQQ9y$?~-/Ӈ3b  ݝ;wS/76^^/z."7&??("/ҥn:yHDDX},a Y N^/K3=/ l*S9Z8"bCTTwuO=QD\C|"""-T||bҤIk׎X^uYn9r+W2eAD.SN|[on#>>6۳o [na͗W=m޼֔K}y˫VڏkQF2%&<<ԩSYr^{yxsaaa5EFF "R9r$/;vԩSlذ>}LLIIёEDΫ'''c 'B~>|IDDDDDe2U<$%~;X:vqN'-S>jݻmȥc̙l۶^֬YCN3f ?Eӄ6v,TU?4ԟALHL[n LS$΅cnjN'Pg:sw hJH# !"Hh޽sAɓ'yHIIaѢEAR+ .dٲe^'Or)|M-[… u{ÇgѢEL>0dF'iֳRJxԿΝMAgS'7t`ׯς lGqlٲ%tG#** u놯/]t!99?0ǘNNNcx UV|GL8jժqw`رc4nءm "ՕAyfVXݻiѢSL!#CWҕ!"{`:t$"""""[BQ("/;ȡCgv f-4J9М˽+qD*{HQAHs! ]rY˔)S>}:۶m#,,S1niժW&!!V^MV/iжm[st~˴i ::8@xxxިQ#>3HHH_e]~QƫQ-ԩS2p\kѢ| dzvZ7o^e82`>];˗sYRRRfƌ/[ov9+ҥ ?III9rg{~{m)l߾_ɓ'ӿΝ;gu,@!DᐞW_wVFzz: AAAӸqc}ZΝ;兗;w믿.5)f63ם7ooN:u'VtԉJ*Ă 3LLL =C~+5),$@;vXz\9s&?qqqa߾}w}7ӧOl2L?`ᤥŋaر̜9VMBCa2ط`dW/_YWQ?[M+bu BDJ BDDD*Dʕ ȑ#tС҉HYk?2BCCڵ+m۶-d*euؑÇ3aH!"0;k͚52e 6ˋnݺ1sL|M<ӦMcT^5j0d&MԩSKrMDDDDD %tͿE>î]M?w9L23x`|}}ԩo&5ׯgĉԯ_WWWTwڵkw%E Axm8~<(dŊ!aaeuVEl/+LD BDDD* .닓S~ HNNfL0 '""e +1fóT<#%%8"RCE ˗o;#rLҥ ?޺u+aaa9 g˖-E*""""RֲXbpwuuG%dQF 6oLxxxyv폛7oSO=űcNJ"%7;HLLfQȹsг'tN$.ٺIO0("U"RT"""R;Hll, @*OϞ= RnzsQH+BWם5sSNOZ͚59yd,+]iHC__|ٳgHOOsQ7nfߪT‰',Y3gаaC5kȑ#Yr%Ƙ[Adfwh|Ճy*gxl:6" BDDD*t߂Z5jŅ}@*޼yhԨQUPYT/b۵k~;>>>E>KYqTedddViE݋+5kִ:T)))YCb9ٶf͚>}:ӧOSVB))8*V1z]jո{3g+WVZt>/Ǐ햞n'88˗ǒ%Kҥ SLᡇ*))gvٿ wށa^ӕiu7q+/eu ӓחロ_~H&R8| dٺjՈ#xG8{,ׯ: >(nVƏou )x^~e qDP GE]vZ*/vy|ⴘŸʽ[׎;ٳgsfΜ?n+W:4;7t^K8%aT/xXZ$euVyl/5oVG)Q*`<<-Ν;߿?>>>ЧOvܙcY_~%r *U"((qmqaٲvtY7n$==ݻgZMϚ{n郯/ߟ={c׮]ooo0`@'6k]c_^_~m~`ɒ%٦$W~wlv+u:ѣG뮻f͚ 2s7-- l6uq`Xx"ƍ#$$J*QvmFmYW/KAM^yiӂ~~qd(){{Ȑ!X".eO?D۶meݺuZID*u3],!7|3>oק~[,\N:CXⱕlc#Y1Ǝ˫ʨQr\E|7,]`WΔ)Sx'L4+VЦM|||[IOOO>)t&K99evYl-2;LEryV?("""%LjH`0KRcH9KKx3gNiQQQ0O./^4wqrr2СCE)%l߾}I&ff׮][o5*={M6D79pU={9}9{裏LHH9zhq̙c._lN8a f#W^dYO<;7iٲOٳgͩS̼yLzLLL}u SN3gΘ>t%qKk䵮Eݗ70uԱoh8`1&--4h?>[Ǐ*Us~cӱclgdd m۶庽k]׫W/zjo9b `FrTA+G_:LE=.3qDsIlnjn\GǶ+rW-R}W{{֭9~.HsiMxxxᅴa"""Jl:~"Gf̥KV' b%SD*cil2LFHLL4*U2ŘL9~ܘ^2Jcݍ:Ԙ]N%0imVGJo"4~*) Hq ȑ#06m*fɒ%aÆRJfرESJ͢EMۻwo'~L6- 1cϛ*U䘞IYVӦM{иydɒӧOd!Cz+| .t)}q#2}t3d{μyW?cL۶m 7ߘo=s]X"x9^_+G_:kYE=ּ͹s]km;v(qd_Z?O>m7n:0cƌ1QFfٲe&#WAU֭͋/hu )cl6c$""RA DDJSI1L53L-.\0/,d"X|1sӴi=Kc h)>1glVGT"YY(q*tk!թS8Pql6 bݼ,_ƍӯ_?+222)w}wܑmZ&M1ھFEE1hР]?ϟw8:|0{_~=z6`֭[g~zǡe6OQ#MTTTeDEE9 fgԨQΕ%ÇgΜ9n*uҵkl8qD #ʑRcK.f{6e#߷ŵ_ yoqNm˔tKC&MXb3f`׮]DDD`٬("PJJ nnnVjԀfN"""""R△8BfȎ;x׋9H9 k[*Ax8isBR EAC2iVG)*`l6 6d߾}2+G&&&kLxx85k槟~*IRtgϞZjsfvUwz!ԩ}ptY| ͚5~ZUV1vڜ9sٳԪU+|M+mKGKڵ9{qFdǎlذ___ڵkWuk~-[Fbb"[8T?cww"}/,q r\秨ڧ~JN=z4UTk׮\rštwX}U/SǑ /^wHزe cǎ^z}ݤd:ģ>EKMMUAnL IDAT """""%n! C(1\|/__bN'R99Ajt cBPǏ[PJ3Όg(6l[o%((z~|TZ5 Zj?cL[bb}CO?Drr}X7|I6lqԩN8Aճe:ydr }inptlGjժĉ9NJ9r$ ,`ٺ$#W-’%K={6cƌ^גPյ r\v|G3fСCDGG3zh,YZNIl߷ŵ_8#4 5Mjj*=O>$գC_H뉈""nu [7غN"""""Rbq(HH60g> < @H ;wZNJ?uے?*w?\Ҹqc^~eöm2dVnZj{̛7G%ݻmi޼cݛ~!7fyf^~e'|uu#JNNϵ VZ9r$۴ 6,k昶|rzeܫW/VXco6Ǵ؎7=z/1ѣGi\3gΰfBrÙ5k+Wdذa[Z r\9 ua8Om6@f AYn]~)mSn>8߷Ź_뽷/^k')GaΜ9 0jժq70l0oݻy饗hԨQEDQ2{wȀb`HYPQD$K͚0q"ܹ+h BC3;<1YGDDx) fYju )'tiOsuѣG `[vn͜9sLDD5 1C5s1ѥرciРY~IHH0۷o7ڵ3^0&&ƴj|ٳҥKfժU&00ЬX>_޽ͨQLLLIII10Æ u:u͛7TvZSNzjcRSSs;l0s}Gxfӻw\3o6ٳgӧMzLLL}Cu ӧOsΙKn)Ǹ Sґf&00̛7Ϝ:uʾ́r,{ȑW^,t_SJx/u+_^Ǡ# r\9y)c 0{6;w4ɓ3 6^^6[FAֿ W-<Źe֭;8R8Ǐ7˖-3yfӣG3uT{RᅴƏ0%6}]DJ?1DeKc|""R9Y?Ha2LCЌ3㬎""0fzc͘Fy-c._:dlM7X4&RJyoRA' B1aÆf%IJJ2֭3/ 50AAAfff֭ʕ+e,O6olnSR%Sn]ꫯf{>$Nϲ~s???e:vhl:u :ԨQø-[K:eLppqss3 64+WtxY?xų9sz˄#G0111;4>>>7ZwikիٵkWqKk䥸1?n<O>xyy>};v 6/c̕+WLPP9~x/u+[n]c? |c?sS\ZTTMժU 6ƍ3.]6_^lo ^|~qd?2|ozq\jjٺu={6l 2qww7zyMrrrgSAYh1DG""R DDJl3۬""ڷϘ1c0zuc}֘GN%la<9eNYEBQAge OlXR2(Rl6.]%s<͝;\{gYt)111lY~A$%%/?qF~7.]ڵSNtԉ;R&2u5jij>C=Tql6u|Zl_}-:뽷w΢EW^)'c8t++IIIұcGvJnԩݲe ۷/~7_lY~_Ē%KJ쳸8h"4""R8Y?H6bQDN wY.xI߬N& DbT7V?|S_IÆ ^cӦMZwNӢ駟زe +)))кukZhAi߾=۷D)=:Q6_~iӦ1|H1q佽xb:tbr g˖-lٲݻwm6Ξ= 7} 8PڶmձEDMjj*ܬ"";CJ l:YFDDDD$|4YED F xY7,7 {-0v,s8;[R O07xgx?$""Rd*ZhA֭Yxq(MHH!!! 6 "}vmƶm?Xx1ҼysiӦ4mڔƍDUVu )BCC=z4m۶:뽷7l{Wʩʮ޽{ٻw/{a׮]ٳ+WIVhӦ wmڴᦛnRJVG)Q5U_T"""""RFlu) 776,O0m !!CeuJ)y7x9:H DDD:t(SL78AΝܹ}Zzz:޽>Ç3"&MW\µ|~)yoϝ;=.\`߾}޽;[GLLL߅5kF>}?~߿kKzz:g*HOLLLÇdv?S!!!4i҄UZ&""76u)cZc`4"""""EN:YHF⌳qD4ADDw`Dx̛.Pfg<>V)4TpuW_WIVժU[nܕ+W8rͬ8,Y©SU$Hz]6u!00|$""Rpq;Fll,'Nȑ#;v;ùsתUBxxx"Ӻujڈo"RԩU DDDDDʅ5pBC3oy0e  AP !p/2 b6lVG)&MiӦ<V)\]]iР 4Wa,[SN;ԨQlE"uV8_Z'"""ypႽr1?ѣG9qOL͚5[.tڕÇg+pDD*6u)Z?:H-d!FrT ԩp!̘ رУ *--SNOd}?pQԭ[{H͚5^:5kTለH!\pSNqN:equѣGIJJ3N:l~?kZpvvpDD$?"Rn ju "99+2("RVf<8|ufHϞо=:[&n4 K!"""?ϢE8q"kurŅ@/))'NpqN8AttM8qWn#Ԯ]_N$""օ 8~8.\<~ll Y~P^=:wLHH}Zڵ]66څ!D j, pr:H|G=cu)k ,,e 6< !UXBxn66[8"""2x+0rHYB $$ƍMNX!u;7s3+Yiu"""b7tP>Ց$...n'%%{{wqN:EFFFWT |oWw"Zj:JDDrYtɓ'1d͍TݩW͚5K.vR7ɍ:Ag~=xP!""""rC̶:(aTx1χ~`X3<qDDD D!"""bg٘?>Z_dVGb၇o>ySSSWbW?>wl߾>=111XTRjժٻ\y???|}}Y""eLJJ /^ҥK\x .d{|ύϟҥK9s! VZ3#뱛vO?eѢE|}:=tHRG "999;w?wErRJ???*WluHʕ+Srl+c""@fwK.G\\\k_p!ks??lգM6TZ5ע*U{t-hѢ&L`|駼{L<.]QF!""Wjj*...899YEDְafL*,e)O46ԱXD 2o[[oc fޯV O4 DAVGq BDDD$#G_0rHlقՑTu^iY'.?~{f'+CfqE$xzz퍯/xzzRre<==rxyyၯ/>>>xzz兟N}1!!$tIII\|8IJJ"..˗/ >IJJ"!!8\UDwאsNNN믿Κ5kOxyݻ7<aaa"")%%E EʢF2;` cC""EvhLs̙0u* > -ZX`&0Lg&3#""0!"""i߾= b8;;[I!'''Ttd_>_|D㉍TT E$Y&d+6\\\'O䄟...ꊷw\¥KHOOŋdddp{1GZZ񤦦r<6.^HRR\pDRRR]~ʕyyy鉏"J{\yf+?Wsss#<>DΜ9C\\IIIӉ&*U 777{Q/T\'''*W3Fʕ̓cuu!,eZJJ  3{d:c\pxEqqq\t+W`_VrrhzY ޛY~~~5j؟*ึ[QV݊2dC رc,^ٳg3uTnf СC:X@BDʨ !&22@9EDDDK,-QD}:cƌcǎH"% ///W^cA $'u{LLL'gM/YL{ `vu;J,W5OnRa\)KV7keTYLAUu\-8˒u|^- \{,f߀`y/TZq_ !!!c:8:n©:Iȳ>O?w}ܹs=z4Ǐ'""{VZYSDDJ:Qu+p47EDDDPQD"pr=2o۶0~<_0rdfאu+**O< i:u DDDD5zh0tP6nHfͬ$r:QjժN;.dWDžOpuqs#,YruA&k]ݵFsuAPk- A{O8yu#6Yy0899ѣGzAll,>>ͣk׮C1 w9SDJY60gLY2k O? V<aqDDDK!"""r]sΥgϞӇM6QN#<<<'ߨ 0_]Ē<כwDV899W\](FʕpyV w w6q~b3P ]NDD6uyzzW_ѵkWzMTTk׶:T*LD>}ӧ̝;^z &0b|IիguL)f"Ryx@*pIDDDDDt7qQDD CoᮻaC=FOOSPI]H"#""/'ȍߟ5k֐Ahh(:H aԩ9rI&|r :u)2;q $93Ev IDATVGf=`*ػcBl oP2ifu| DDDDƍVoVG)6;K.]Xj#HCH!""""rCXHc0""ƍ3;>YР bux9[EDD$_*V~-ڵk׮̟?H""""͍CqFW΀hժ~ CDDn`"RU YBDDDD \_ evBCO!=eV!˽ʫ!)T""""ͪUxꩧdԨQ$&&ZKDDD؅l2mFf4h:t૯::agu |g??dQDD ;`FIN"/[Lzֲ("""yRA3'Ofʕ|mۖX""""%e˖|4hЀpڴiç~ju4)u)S}'@nv^h(,[-xcӕ)7qTZEDD$O*" ?$88P{9%"""R"Zheؾ};7fРAt҅UVYMDD!"e BDDDDK#xg#"Rt fv9~&M!8`fӕϏ&6YEDD$W*" d̚5YfѢE )"""ZVXl?3+W&<!D SA!yg5fQ*AXΞ[n`"HK:ӝx-Xb âd"""T""""fg:uDxx8aaa8ph""""%SN^M6JnݸٿDD*۷s}1p@?ٲe ǏgL27|$Hpr?DDDDDʆydI',;i@㉈ 776nxhMrxgX*v?a #`F0C:Tp.V%00O>HqZh>Ȅ ]DDDDJ7LTTQQQ}ЬY3Krc ) ||N#""""\qW| VPs gĉ̝; z)N8au<̨Q8p>(Ǐ]vVGP\\\Ar?\D$O_SS!""""$cZɿ'(5w|WDD cfv YΟ=}{X\:a/O}. ^(Nqʊx"""v*WR%z)>/'|BHH<{:Hfʔ)ڵzѽ{w8rDD*{]\JiD$_"""""eH2y>AC%*рJD pri;h>ĉ"vK,C:ㄓ:T""""ˋ'xh|Mh޼9}eڵc("""R4hW_}ŗ_~ɮ]hժo6VG)zꅇGϹNjJ9*C BDDDD ȭCl/ORJ$"Reu9p w, yaسr}v("/I$NfN:T""""ݝ۷+WrCfx8qEDDD]XXvG婧[nq"""RhϥZJDr!""""R拾mVJiDDʸ : O-gOX*-[gCh(?Q6lf68:ubԩ߿""""Ew}ĉ?{wŕ;44;؈ n8$$bf̛Id&f3J&nqh4?DdYe};?Ayvթު{3Ν,}c(=(+H0yd=ZQ1T(b9!1c MhcT16yz;v@L p:ܬZc:`N(b҂\18!1cD"AHH>ܻw/^Č3{n9~~~ظq#>1c!lقW 'N_|c족xb<8n``h=GD9BH|c1eB_+xCQ1C wOs_-)BTF )Fb16qBc1CCC̚5 wF~~>Ν;p9r!!!/ .$c1qʕ+xꩧ|rY1=;;;l2=GD#0cx0S c=D<::+@[[ǥKX$Nb$F[Pc ecAso\\|/q1̝;Xx1ك;w;\c1Q̰{nF|| ___B.#44vvv>WUUBӧO0A.sG8.] XӦMG}K;,둛"FMMMEmm-PQQ!jZkk+UƵQn*)˵9!ֿ*U:bGUCy)`ccCCCwBT*YYYX4c'0c$h477Ù3gp0003 1{lL6OvB[[_ӧOԩS|2SLO? 8u>H$L> .DXX&Mr1Ƙy{{#11_7o믿CCC}c GNN;%} OOOxxx`ԩ@KK ,\xyyy*:%xxxpww.2t 05}`1"*VTI҂J!aCDZUUTUU [%ch}'TwMMMaaa 9>>>Z%<׾6'Hw݅s3 ;UIOfLC_QQ H022-ߵ2FFF wܾscGh-.FMv6Eka!JJ`PZ rTWì{ ըݱ1cT[vvvo\*qOI2رL)ߑIGss3*++\T*9lmmacc٩^Æ cc1fϞٳg^CYY Μ9>[n LYf!$$fؠQZZCP >>wޅ̙#""B[ΝC||<݋?pppy www=c1%###l߾#G~;8xʱ1 dffݻ(,,<''C1Lٳg ]]]֩[?s!;;[T*UY^^^^*c"TU|.1c2颺UUUBFUU߿ZաB yMM *e쀯oggCCCNNNN055:JN[%J"~re4 zLLiW|e29 H1Ԅ*ͨu'|AkKKKX[[Vxmkk+tV&XZZB(}fLPRR2ܻwOxwz]ZZ{  fm OssX XXXN$d0I"RuTVV£*Ӕulll0l0888`ذaǰaÄG׎prr⛺2ƘFc!+V+HJJBRR ׿B"`ԨQ5k1w\xyy9rhmmō7x000iӰarL4 D<H@ff& qF_2 DHHʝc_W;v,.]3f૯bniJDff&n-d2rJGJJJ r=%(VگmV{qBc1ACC*++QYY**=WvWcG===vvvB'UKKKNfffp'sM#f0}&(;WVVUUUhhh@]]*++QSS#t>Vv2V&Kݽ{Sbn(lmmaggT*X6jkkCII q]ݻ(**4Q7n|IG055ٶW&Om>i(77W%iX;gggNNN{NNN|16qBc1ƆglȨc֦ҹcGÎw(w*++i^vvv:`ȑj;‚G`e";v+T:kJ駟ʄ/"4_e2V;w`WXX<ᑓ򺤤D'Bg|ggg7N'''888W׭&HhJuLF*((իWQXX" 344 OOOx{{ ===dBc1:pttĒ%KdEoqe\rݻ###;V%I$ @weCSǤk׮ I[lAdd$dTQx lٲEHFNFa1\xկ{k֬wXPcc# 4JJ2C`` ĆÇ?FcccMcHWTPϕ:nӎmʆ NQ0cLOZ[[QRR"i\ XcGG6luذa?~y[1Ԍ NV@RYYr|ׅFҔ(#''']II ܹtddd ''G^^$)H"t7&$}%k㼟_eJ)EoBQQhjj ??? >pttc> !1cݰEh"aZFF^+Wի8p`ii L2'N1j(qu/33 qqqP(hhhL&CDDoߎ{\!q{6m"##!1i$H$~1Ɔ333|xWvZdffb۶m16t5u fGXz t~χ4jԕ4Z(>iD]∏,--c띻w1{(Akk+#[888IF񁁁jh0776iBwl RQFd2FLƎw1Bw펼<477|F* uQWWW9Rx,8c.3@aapl{0Caa!ԤYndz޽Tp}');ϟ?ׯ^{zzo*LLL=괵!//S2Չ'p!;v,0fol@fc1FFF7nƍuxp[n͛y&nܸ'N #$7w6P(P(p0a\rsT#w}We֭['\.c3<Tkעc-cuj߉@&A.t"׈iK*B*"00PcM#O RS(#IxQn IDATEa!(c1dNN򐗗yaaH$pqqPaAHz^+?077,[VVba4.ׯ_G\\Hpuu'<==.<ߜp>_䏊 ƌS駟ƈ#wwwN`LK7ϟ!??w۷T?~{{{3FH3f &M+++}cl1c}XHYz0v]x cccxzz" ѣGBkzqqq(,,Fx4 CbbbTF@9x v!"(16XZ B}}=>sN alW|~muՍm!J* suQZZvv㑝6СOI둖}1WʤM@rz,--&16988?455Lm;8zo20 |qZ[[akk  <<Ν;W^ō7[[[L<)S}UqqTWW&L)S0m4̛7z1^1c9&N'LonnF^^Μ9]v@绻(G1b:CNVVP(pTUUA&A.#&&aaa hoBBo ̜9rwȌ16̞=N¢Eb 6;;}6ӑX䠭 >|8???B&RTk6uLfv횐+Gѣfnn\\@5fزeT#91ƴ8'CCC}ؠWQQûyNNZ[[<;88܃;%z@kXw4RQQ')) ٨+o|nL///as`l xx;clŵkאN... ԩS_J#g1o...pqq̙3;w=!..(@L<&M:Ѐ>}ΝCZZ1uT,_Ì3`nnPcF¨Qzj\tIHۿ?ZZZy!,, 055s0c BFFF]-Z^cc#222TE~g|(((@[[C( c4ڽ{7rrra333P(Bd2DDD` Fy/ ƍC||<}`ڴi\.ǤIN91օ9s >>X~={=>n2օ:gff"//BՕI;{{{s"cHRHRj,)JktQF8j}$0cL҂\rExNNN;v,'~0ӉaÆa٘={"n‰'PZZ ###`ԩ#00pHġ_5K|רŤIo5k&cl0771|@MM q9={{쁵5ñtR,Zc c1ajjtz 999BVV233 .࣏>½{򂧧'<==-򂣣cׁp5޽=^~eL>ϖWSSs!>>'OD^^0o<޽pwwe BPPmۆR? o6l'''̙31ܹsq1DDDoCbL/&z(߾}KRG׈T*477Tkl&pww8ʈ9 Ӂc1wp႐?VVV BXX?cԩw1ƆM"999* BMM ,,,0i$L2ӦMÜ9s⢧Fmm-=/OFSS̙7xK, 1Ѭh"F8z(9+V .?%K ƘFc16`Euu,!119RIqss:ոIgwFVGֆ89rǏǦMjժ^i ?  .\6L0VBDDfΜwGGGDEE!** x( 7Akk+&L\\9s*cP|Xz5/;$tG@eePL3x`` :acc5bibll 777iL꘠P( |cB|LG@p`1]EE;wΝí[`ll1c`ڴix1uT=G!d16x{{[~ڊ4\zW\;7o͛Nױo>y,^1pwwdz>g}8~89~< VZhL0Aߡ2Na1c?ǏW~CC򐗗\ 77yyyt Tlhhggg$ϕxpgXHIIڵk?>,֯_ Ē\p 'N@QQ`|gJG`` Z|wáCccXh<==2cժUPTT^z nnnXrCbLFP.**<\.իy4ƆSSSd2d2e5(--ӨAF%Ҳ?Vvv`1Ezz:N8 "L4 عs'f͚+++}9脆"!!AogcǎرcOjjjgܹs׿ <<>(y׈N›os! ۶mnĆ+uaݺu(--ŁwK/a…16@pBc1̬F(..FAAHLLDAAIcȰaP__iY|_[/":oii͛7x\~5kyrL4 ۊpDzz:0bcժU3fL1X={.\V >>,z^okKKKadݻw#33S_!HTGkcǦM~2 ӦMwHBmlgll vpppD~ͭ}&==]/r|4BP2?Ų3/mjfmvԴ\e#ssnF***4:T*8ʈL&WG|@cݻ8t>s\v Xp!8Aqc"u2;;;96m²e=k?6X(@Ֆ.\(tX.//ӧ_/"&O'x˗/#Vuel޼Ɖ'sb)䄔8::}}L@uTc doܹ{ ItԙF}T;S+j;S{{{w;\jjjn߾3gP^^6!AE.(..HVuYYmǾ^gA T T@e4%)')iNjpn|wXcDڵ Ǐ5.]*tHm1:3ZZZP\\o=s-oF#oP߶Xr%V\\p}mۆ^z =y̚5KqVTT`Æ O!ˑ< )*z />c:PطuQWXv-6oތ'O"11Q&B}AK/{(Jf¬Y͛7cܹXf v ;;;} h1Hp {8H$>|o>DGGɼ/P(xG{<KKK̘1Xx1|||tIezZZTBD"͛71n8aZFFQTToq̙3GBB*++r ,mcp}IIIwHZv8e<66O`׫MP>}6߿/J ___XXXqz&** /ƓO>)Ly&n݊j/>Çȑ#/j3?}.[t5BHOS}1(m]jnnFiiilpwwx#o ܽHc bbi^x7o3f`ƍXx1̴@nP(L=DwzRWV}ix,q$y z|ЀGb1}tzI z*,Y6ݻCW$ jkk1vX߿s/~]ccc444kb`'cǎ᷿-pرN^c47b1c9LLLDmKKK̝;_|ˑ7K2թM.4\;Uh~M,XEpiˤ <<ְ… R /d2ꊵkʕ+Z!"]0l0466t}k֬All,ʐhbŊpppɓm6\vmH,b=qq`ݺu @DRR ر7nIJe0yd~~~ o>dffB*B.㭷‰'*#55 ػw/mۆhrdɓ'U=zxGq N<' 333^PIյzKLJXd lmmaeepܺuKr?Q։cDrJII#<"G5_HMM#<+++ ,, iiijb.Vm xbX[[O>$ݻi^=ݾݮb ھS@i3蒱1(lܸ۷oGll,:ddd 11@ttpHPW_077k֬-[o>ڵkZ?'; cL'***l2̙3p%,_|'teԩ!Ssib뿺Ϸc}}{K,Ann{ӞHL[ŴҥK!JUUO@iiiXp!lllb%bۆme̺6ߛ'm厺kji헫7cO>!044ѨQ矧L}L+W˻-Zq?L#GT Q)'Lzz:лK%%%TVVF LFyyyBŋӶmۨ2/1eٴiwmKii)Rtt4rrr(ڻw/ݽ{W!2ƘN={ iϞ=Ekɔg󏊊>{]_ojjJNNXڵkPTTL&#D"ԯLMMI&Qpp0EEEQLL ڵbcc)99 5U6~xϧ 5j0\]]PKTXXHk֬zJe=k)/8f̘AR*..{(++ej.Ab!&wwwJOOrw!OOOeeetA3fL{WЂ ɓt}ͥ(Zvm*qC?cq 6QFF%$$L۷oh Vi'KR  \NѴuVڻw/%$$PJJ {:=w1Ɔm1m۟n"??? h 4rHMWW]dI6i3OMWc{8@3f8)1Ķkom ҷ~Kuuu_̷'m1cL.Цm嵗Mܑ̾6n7m_]Z(zzޡ7`O4|p|y?իϗSUV_߿ĖqC]cɹdaG}D]Q:zslŹ@m].Y.7<~G2egߟnܸeLV";;~9nj?1u{>5BCtB筷"D"HŅ/_N *Zzޞ-ZD{|e^[r%}'*~'rb<cǎNۿ?mذAxmeeES)2/1errrO>)SݹsS"%%oNrLLL(&&I!2XmݺLMMիE+Yyy9%''Ӊ'h޽CW&\N2L%ؘ\]])((hÆ }vDphԩQFF?^xo̘1tm"zN%$$̙3iLmmm]ƧͱE=~t,f>jOh{yHmb8qJ_M/544ĉiNa3NaAB.qBcSll,^YXXмy裏> }(Jee%>>(""K9991zBCCw ݄:d2266֪o{Wi۶mD W^{W7$"/BtiSn^,***4=33\]]6t5Lrqqr>[VV֫uqH$=OL br$ٳݖm@#}4x2@:c?EBțoIjՕ4;]]Sz3Om:yj3=%ض.'V(++4.Цmk/;ܑ6n-fz;GwBrppunH"lq(44Tbޖ{Pcws逸^{L".SE?]:n>^ѣ$HDߡ08!'0ޡCI/ӭ[VҥKi͚5*^3444bpͅr /L&#kkk ]v!Lw9ܺu^~e255;1`PRRBnnnj*}"PLY`qQtt4}HYYYb.m+jZ.ڂb[^]{CLBW%nMlumyBMn7]_ EO9O4حYy>bb"w2tE~8w\Ծ/v[Ǎ\ߡCh޼yDD׿vIDD#۩c[t;؛GO@9ϪLw! FAGvPbbCQ !a6B c1;w",, x7|rl޼| bXZZ;;2]"tZN6lʺ\2(//=HWy lmmsNddd 33< :+WjUa؈o7oo@e_e}<Ç;;111x'ľ}_˖-˱b <عs'bccq%U߫Й4i򐖖ӧOޛ6m233qm!((HxoհDRRXG}]:jaa!\PXX6&n?[TTii}:>ULCi۶mꫯn:|ZttbccsNDw>Y;M ơCbÆ H$:/)e...z1֟ ;ciNHϑ0/N:{!!!-ٳg {{{bǎv횾UK" ;;t###XYYLsqqAnnʴ/vܹsP(TݼyZ ;?11&MRY|N,__}Μ9U>y[طo-['''r|嗘>}:bccQ\\SN^e ??1 ,&L3<۷O?ŋF#99ǏǶm0oX" )  c1={ꩧ?E/~㝒Ğ45'i{JS\bڊ׶-],R-ĶKz-ivG^ԓ}=mڅnڬ[zsW^Azz:6ngȑ#gϞ>[F_1b֭[W^y{b/z{\T*ń g8;; ₝;wbɝتs{YqW}/Jݝ6z {DZf`ѐd1֟\1ƘR-GꇕcL[0>c"TJH&Qtt4ReeC$߳f͢K.Quu5Rrr2͟?^}Uk֬_򗔗GߧSNQXXXaPG󣄄7oҤIVehر_PYYUWWS\\ӱcT!,,RRRW^%KhUh̙mWjkk)!!bbb( %rڵkegg;D3'??? w(!wsSrr2Ү](&&(88d2C+QTTЮ](66_bL9BO? 9rDezXX7,jlltZfMغ:bNC9ÇJJJ'///rb럚xxxХKN>MDDtmrww{)==yfdd'߿JJJ>s UbvmGM{};OvP7C?L m} JHH?oN$) 9J@ri֭w^JHH jjj @t~`16miOȈV\IUUU= o@몝O[o2M-[KonzĽ{4~^ZԺi+j_۶`Wm3gg}.ЦmutuQOۅnڬ[9Gv}o F|r222R{MD">ϗF3fL/z{ s}DD:YZZv?3266_gz{l`G9~}Bf8uUwR߂FZǣ| I$N$Xio !1 4w(! !NKK %'')88 Ȉh֭Lmmmzᆪ7O&&&dnnN'NwySLj*rtt$KKK\O{.]`233#OOOz7:-[L۷o?NdiiISLtR@Pc=FÆ #^x*CD4}t9s۱eddЮ]H.)jllw1o

SYYYΎ߿2V^MNNNdbbBcƌÇwku8SSSideeEh"JKKl1OMbccחLLLߟ?.?… Ғ,--i…?oJJ -Z,--ښ"""Ν;d``ЩUuӴu}5Ovakk2e8p@(;P cV{YXq1X?뫄uxGGGrvv{RsssOBkg\tIxğֶ+&6m橩lW +++Z`=MAAA]+m vh@YYYAvIok/{{beMkj]o]5}7;Mss3_"'''rrrg۲7oLFFFmGIII$H:Mߋ-F_~wqQՉ_ 7D. 2IR\]]6Kk5+-vMm]von e*iLS Q. E?fgS<Ϝyb>W^UXXXc\]]Z+y~J[%+^l|y%eeeU!!!UHzC̙3R'B%k ^(""4HG8ٱzjƏ tRElmFRR~!tЁoLll,FuV裏HKKLJÇc6=z4F1L\\ >|M޽{j۷6_fM~uzII $''Sj?v]\\ 44Pm'$$777Hn}F"%%(҂]kߙlwff&֯k מּ_5Lo50y2CRIDDbW377+о}{xGi߾UW^'|( Ύ0LI E_~~>o& .$##xsӨ9-Zc=]w믿w޿4-k֬!11wy(#>>e˖1k,fϞmt:5o" ~""-H)8dpƍcܸqDxGx衇ׯf̰aptt484 ORRIII|TUUѷo_^F͍7ވQEDy~zf͚ŋ#Mmr}rss,$''cRRR(**o2,Xࠏ/+0i$\]]9|0ӦM6:43撓9q8::c 2>0$$G[#EDDdb޼yűxb.\K/رc{[ikFhuP8eeelڴUVȔ)SxGҥ!ONxx8gwEZ;;;0|/_nt:osG}ȑ#%"7""W N=08\H"##/V^qwwo&66#Gdtdؾ}; $&&111Z&{{oߞ 0e&O(G1d2a2wM'''ɓ')//q2jMKILLd…<ӭ[7y&Olt4iB.UvKNN&552n`ڴic5~ iipmF'i0+Ǝkt$ieL&QQQpuEl\\\m=߭[&qA.HNN$//϶pڵkg07ZfQDDDZ㉏';;7+0sL<==6l7p b:ү4+Νc|Wڵ۷s91b+Vn託K.,[W_}+V /`6o~Ø1cͶH󓝝͆ xOcԩё[ou1j(4[DjӞAD ,&專HKױcGƍǸq㨨`(۷/G&66~a^s$$$NYYc̘1,^CTwpO?׬3g,La)DD@T ""M܋/HϞ=Y|9i43...?[ͭw;vBQQm}T,#k$77B'cqȐ!!!!z֔lct&%""MSj 5kyyy8::ҧOH޽ 22@DDZl<Çoؽ{7| eeeL&ȸql%F#7Ǝرc)((࣏>bݺud IDAT#p 7ˈ#M6FG&={m6~;Vo]cDgJ+N:dt iDmڴ!**(a֭$%%b }Y:t7ߌlf:Uf۶m$%%Gf.^&DG[NY"{TY -T\]Ɗ4]taԩ̙3x#\S& Dddd7(=99ҭY,#.bq2Vrr2֯^ l6x|kpEsapeVG?cƌa̘1TUUq1[Id޽Kdd \ŏ?k^^^ՋhqHXXvvv'o|ڵc„ L06o̺uXhO>$ :#F0|p:}6("TVVrm֭[پ};3rH|Ibbb\BDD@i*r޶C,>(SN%""XfsXHW/o߾L2X>@(RY p m:tzYJ"puVDZ'|e˖[o8"d2u)++̙3ul߻w/PYY ޶GDzRPP`[d2x \[nOO doo)wر?-//'-%l"k#PQNNDEt0~~~L<_||P%G8::@@@@ .^IIIV... "w DK-[XB娨k-C|1wDDDԩ:u"&&EC7}}}{SDDʪQm/~. G*~\CDDDSYYɷ~k+,X,ڴiCϞ=8p dGË4c߿={{nvOEE~~~\̚5ÇӻwV94 BDD@if1DDruul6c67o$%%ĬY1cunԗva=۷ B||*]HIIwwwvJpp0X㼳&"Yڐɓ'IKK#=='OBrr2ϟ,3v҅Pǝwi+jfF`ooO>}ӧ3fɓw'??'''ۺՋ^zgVN:m7PZZ'QQQs1p@:wltdiTL唓AAJ<Gyy9v͌l2[1ZiňPSPP`+<3bt̫Ӷ- `9YUU?DK >>?D{wKDD fL4IG+i...?[ͭwf;vBQQm}T,#ࠏ[: G'N|c[2dHzVTI $$9F'i\]]mK/VYYIjjjɓ'IHH ##V̢tԉ :uD```:ʹȕ)--q IOO'33ӶS}QjԩS'o ;,Geٳo>lիݛzE Vٳg}FBB ,`̙Cll,111xyy_ HIIݝo_~#GԒjgl"wܟ "֭x1$e9=z@6m%<ՋO>ltVd2a2w[ [oYF4TY(99TlW/ YFK!|?""""MmFÇ׹N}?>'|Rnd;Ν;X*"\N:e^,++Jڠ-==zGL8Ѷ<77C~]3g5?߫$"z}'uFDDvDFF&"M !""GF7HK˸q7n߿6ƽKee%}l63zhn&5ᄈv}vׯ&Ll63l0}yg27[NVpk9cM ,짒HT dY."bHBHf2w湲2Μ9Sgq`޽$&&bt䄷mA}i8=222HMMd"]t-V :i~^YYIff&ͧO&++SN߲yf(..]Ύ:닟~~~5N>>>x{{7Eu '');;,N>Mff&>}3gPUUe~۶mk:uDTT\ΝӾ3L5:huI:ıcMJJm6/DXXBΝܹ3mֈM1DQQ'N 55TRRRm\xHHaaaDDD0fÉsoHTLG9JxatiaڴicONN[n%))UV1||||>|8fQFبٶmIII|ᇤ˰aXd 7jf"#-I,AKAd>ϵk\]O=@~a>N8AppqD*8::@@@@ .^l筬G,ҭ[7<srrl뻻LXX7t<8YDYR!DD2(CDDZooo!$$$ȣ>ԩS 66СCqƳHT$!!/{{{o[bcc߿?vvv~[%GGr*+C~*+VXfCR4 $"wAX|9?qD8;;fOII  #֯^d8[cl^l,gffڎL``wc69'o_3i:Cb\箻N""""MHv޽;ݻwsss 77:Ϲ>}ZbRmZcǎ$(ZvSIIIۼW[/RS6m.^hfua>CN}-KId^ز,Ji(2ڷ7vDqrrg՗;"ϖF*Dرl[kPpph:gY?qKHGGG|||l2dȐZR~H`"=#l[߀f_:rێYܹs5~呙_ .ԺM{{{<==1L憻;^^^憇ꊗuuu,*PXXHQQRXXˣBϷ]~9 l撗g;BuxyyYg@@={3 %-U۶m/^yy9Nɓdffr)N>MFFYYY|eeee999k{ uС[sf_:{l309svӔڮoθԱcG뇯/tؑ V|&"҄UP0)FGVٌlf޼y$''DRRfbƌlfȑ~T\\Ν;m߷om۶od̙fy ^i9MdYVP`=?gziA,E_`ĉ| 68"҄YjEFFֻN} k m7ȥJWRedRSSk|]b}-^=_ppU ^}۠VɩUtpp///<<Ⱥ]v899Yz\h=c˺.vsŬߙ?='X_g؞+k,7dZS}fm-DDP!DD2|8DFG3կկ~ł ȰC̙o>@kq뭷`pr!6rᅦ/;afXrY0x0 7ݻC=_X{̙3%KD(" ё`]Fopw^ya+w1|p[ZҁCO^ELc3`=itLaj5$""""j׮]vܴΝ;GEEPWy:zyz:ӧO^<8BC]%2c.lloogu jb%ZM6xzz֭^ _K^PZZʹsl%|یFZR\DKaW/V/899Aqqq<<zU:YF[%|iE Xۼr3?8k1[oAYLlt:MG@~ts{V~(u .uYz).W3[X%k鲺K\rs9Y]niƌر[3H˥oDD.Nv2Fjmڴ} yxl9e__XN˗óς#g)  7>>fpדB4-?bDFFiPO,e)ws72IqzGk!+"bQFxGeiP׃|I c=f̬a G8$&AK,I$QE1#{xa8m۶ԩSL8*}'"Z""3vRJU%(,} /6oQ,wA׮0i,]j)H0zhN>޽{""9N$t+G*˻PB 1t;O9FGnbˬ{DDDDDDDDE fݺu$&&2o<㈈AT;Ibt㸻[s͛;vٳe LNBHH͂Fru̇~htO!6$M3Όc[!q19xƓD%k7:HK/O?MbbqDD*h!"""ҴY "g[zXfHIDBC-)SwiS54rH>ccXl eepF'izғE," %0,B#JKpuY{DDDDDDDDAM>x{CGDD !""PJ)َQDDDD677矇!7mɓ1O??GˠbS/0l0GaHIv#8=!(f0TrҜ#[O=vvFi^}Uzws猎#""HKN )T!DDDDJm 7 2sz???yN+Ҫ{{{8`ti6l//2$F8c/E:COz2ktD1ghe@1#WJ#HQ!DD .)"""Ҥ88@t4̛YC`KydxeӊxnnntU1NB~;8:qqc';2y`TVN#""""""""ձcG֭[ǧ~ٳ#"" D:lb%0QFGKB> NU[7_,?##al8t"-u]7|ct iΝi01TQ˼Nvҗ `KYJ1FG4kF' 8E1w\>#㈈HP!DDC~FGe2qn j6òebHd$̜ ;vTEFhF[o56xIq 92itLZKx_4""""""""RS2i$>?ntTHE$&EDDDD=DGâES9_nP>R2:H֥KRRRcID[Byd2:!hְd&3YzэbXZ)7:REE0y2ot_5HKBEֳ \0:\ !iip L}r5JII FG֤2CHlIgG<$&6aM0d&Q֣Bv6&FDDDDDDDD.Ņ5kpq{1㈈5BEV_HCٳQص #w O=Rҥ 琛 GD.=15|Ld"+XAK,I$Qʹʕht aaa;,]z8""r""RM6ld#pQDDDDK/Ar2&^{ Ξ5:H !"Ҹ,%ϰ0U#y#4]J(!zЃ'#ʥ8qq?ctcƌy衇믍#""׀ !""լd%N8q'wEDDDD[T"X:޷/<$Cl,] Fir ɓFG$1,͚3Όc[!1axƳFGQp 0wiDDDDDDDD* 4s9㈈/BH5o;FGCt4deYf nSac341>>>h"HcTiazғE," #!h0,B#J~> F'5k(**bҤITUUIDD~BDD[e?"""""ME۶0n$$~ 70{eH+Cvv1Dذ:t7:4v#8p=!(f0@Tr舭SA~;97dt":vڵkٸq#_#"" !""F4FG(0R9rvCabrD4:!5C45 ڴ1:4(x7H!'y-l7(3:bPXh1ؼN$""""""""7Ȝ9s9s&۷o7:\%BDD xwx8""""x#VAI  etBFB4ص28]Z _|'-l!P0 LNLv6͖RBdщDDDDDDDDz'0:\BDD7yrʹ"""""͉ IIpX߀#D9{1D5HL1:{̘YRHa3XJB %ֲ *r;[Ο~ ={HDDDDDDDD1;;;|ML&sFG+BzUT7$&ўF*eΆ;? χիͭ_DDDDDDDD u)S?GDD. !"ҪlbQDDDE:qasrrx`ܹ̜9 … y饗lٲe g&88GGGڷoѣٴiSnHйeP]j7Bn0i:tMcǎ̝;06lK,a…uϟ?=Cc޼y4!DDEBg)WV> .Lbp7O@S\뮻Z?뮃}`Vxqk&cńR㈈eP!DDZWytf4"""(P(˗/'66___ ƌScCrO"""EZ 77 G?{Bޖ#1sMﯵl=z}1cưwkCZBDQ|!>(x7 y/}bs̼3|887ԸDDDDDDDDpqqa͚5=zx㈈eP!DDZ-bhC㈈48֭[Gvv6ocڴiuRSS vj߾=N{qٳ'<ׯѷIqpAȀ!&tHHHe&\ۿlر#ujӦ Fl~HI>xG9|N(Lc0$|U۬__n.\ ^^ w"""""""",l2-Z|`t*HQƃ}ӧOwU+""Ҡj]>RD53|@7C kYK9}[Ǝ#a0ߏ4[Ǐ$%%8""r *HTB /2iotF7}t^uXd ~O>1ԯo#攻;g&445kRfMBCC3g׿DDDЪUP CoBINե.AH" YE.7Os >_uk3zmWDDDDDDDDJƍ3e&Mmf1DD Z4mtCyyyQtif͚f͚{>OO|J'"؀?t A>0g|lu낧'ljt"Yʖ5wC)FJS^~a0?q8roc}}|fφ_X\"""""""""9|0qDDJ,+䧱.uMoʕa:x h|a/JDD$gW$"m|.]+Wǎ;}$$?3`eK޲&lI_ E/PuʀH""% BDV5J?DDDD$%Ko_h"#T"""wl fDJ .^4vի֭?n4\u66P:Ԭ nnЯz ߦMPږ l >! }>/}ÏZ*ɩ~vZþ}TёDDJ]!-"Œ .x GDDDD80gݻÆ nl&YRSBD ¡Cg}p8a.Bwk׆2eti \Q/o^V?VRaxuSg>3A!ȋ˫߈HSO=ŀX|qDDJ}j ""fQDDDDD^r2* ti)eKR$R\ƍe lgΘ>x6!CN ьG~$0zЃjT'=\p)\"""""""""٩Pmۖ гgO#*b W͌#""""R8XZ :;V&iif Ddش VUq\\x4f jO?mQȔTFא&3t_^%,0:p^^^3tP<<FǑ"(99nݺEDDD$osЧ|XA)Dvs$RԥdsGЩAGA|Gd%7 h@_2T1EDDDDDDD gϞlڴ KKK#{Tr qss˫<""d7`0vNݻ9q BDDDx^.5w K4D"""uAHQ ?h.;t Z?W_5*l_ 3'|B t##@ݠED])w-㈈^Zq0r$ot\ ]B۷af<a> !maÊM1ݞf2 0gx4'pnp""""""""R4nܘ?GDSA3%_bqDDDDD ?f͠wou4"""p:>>F'_!03w>K/ w߅5NY`Q_|>v 7'Ï4: FÃ޽{dtbM!"R,_bh,SL^ IDATdt ,0_xbt{T ݍN"0y2< 4js@p;07:pc&3I qc=y'iE+"*UT)͛ӧ;vqDD5H0QDDDDDu͝BƏ7e[DDHQQоhQ$ݍp!k..G悐MQ8\"Y>F'bnաOHMٳ 5 ڴR#'JQ /XRq _|jQ o" z(""""""""y7ߤ[nӇ/GDXҧ% cIe Kt+ ``@DDDD$׬૯ 6/6:Tk֘mb `x)hbb)^^7y8H ' L}L08gpB)nONZZF2:H(E(FqLcQDDDDD.77ƌ׍N#""%QTC F'v0 dXh\r F,vl݈&_x`qt'#H1QJÙ3g}qDDɄd2:FQP6"/JU㈈m&kgF'-X|D9y^=ݻad/X@7) h@A$7hDa\EDDDDDDDW^ 83gGDXQAIWB?эntqDDDDDUa8 ӧN#""%ɖ-p BJs7ݡfMsA v/-kt4E/]p?>#H6eJ.QDDCIII!88f͚agg͚5#$$[nekaaiii|'bkkKÆ 3gNm9rN:@*U8p 7nNNҳgO\\\rʴmۖgJXX͛7;;;<==ܝ%l9G]7J*-[W_}l&%%1tP{1jnu>U39/… B Rn]ƌ˗3Ƥ2c ZhAժUɉ]1"""""E?Th8SDDDEA&PI$رP:t/¢E"ԙ3ᩧN(wqÍ'!XbyhNs '$#HS|y"""Xd K.5:Ha!"%''Ӷm[lْiy\\qqq[kkL}}}={vCѯ_?֭ΝÃSNpufΜٳgs# .MLL };w֭[t1K͛ټy3&)WMsj={oxb vE\\*U2v噖fnu>| |tUx~M&o-ʴȑ#foNr4WNb,_ɔ1"""""ENGèQbt") V6w%9-3`vsϐ!зc!/ȏ;|@P1䌗 <֭[IDSɵPlB ={6gΜ̙3DDDP\96nHhhhEGG˗9~8:ums)\]]ٴiW^eӦM޽;W۵k?KHII!11ɓ'c20aBƸ)S~z ?͛l߾.]dd2e6bV^m{ƍQgs;wzj^1_·[Ww',e1FDDh"Yt)gϞƍl߾g}2qDILL$%%E۶msU RD^x8#bSxM$jtL)RbE|}}""R, Dr-ɓ۷/8::ү_?>LcAǎ)WիWϸ?...cLTTSNM6888ЦMNFbƌ<ˆ#ؿƸ 3l0\\\E|wsV^mӓ0۷!cYn&aYeWx~g͚ŋ֭UTL2h"#I5ؾ};}… tؑuxHdk piDD[V$bc͝^j׆s'#G 2>(.Q?X zW\ $3=ܛ=se̛78""E>\;|0;v̲.D|||upՌe j*ػϒ%K&**ӧOd7o}w+9G+}vڴiѣG̲,7ssee.oMΘ~ ^xҒRJQNLՕoMzh׮g .'cDDDDD}|y>$""REEAǎ*(^ph<cq#H!aggܹsYf _3Iɵ7xÇ3|Ν;ǹs;w.#G4&:vaغu+ׯ_g֭ 6,ۨ^:&M… \tH|}}ٳ'#Gd8qdvɫ1Hn޼ 9GF:t(7oڵklڴ)O䘛I?zދru> P6tb^p0@JB 7f2$0gx4'pnp""""""""b0www?qDD$+H #;&ÇzjnJ޽(ȭ1c_sQZns=z+3'2qČzb:5kְa !Cf;wn";;J)?h< fnƌÜpa-rw>۷/{aԩ 0cnݚJz+cDDDDDv77ӈHq 11btݻ!2ʗa YdRĔf7a1!a =?41EDDDDDDD &Mbڵ 6%KGDQ5[[[ iӦ.]2eдiSYn666Ue:t耝*UחO?+>p#G$$$zakkKڵ裏2ښ~cӓ+Wf eРAԬYkkf92ڂ ߿?+VޞΝ;e*N6YV=}NSL!&&]쌵5eʔqƌ9{cOZJ*3sLf}w䜌)Zƍ᫯N"""͏?µkСINoʕ ͛ AA*Gϟ8Ʊ<ɓxM$EDDDDDDDNdd$~qDDu v[[[r~n]zXjUeӦMqܟ%.ngmm}ʗ/ߋn~Qnն3k,f͚q昱^CN祗^⥗^Lj C` BF"* y\\N"`زd_eKsaHǎp"1#F3pyשJUzы 5)""""""""䥗^w޼;xzz""!Ru҅[rN:W_}{kfp:)(ә2+ """"b_ajHqa2 B||N"e.tq__sW 6DY)JKY1/M-j7QDa"76'44KKKFit"E!R|nݚ䄯/W\ ___I( d:DDDDD DDD={ 1Q!Fڷ?`|Af3H" L}L08gpBO˗/d޼yF)2T"իy嗩V666ԩSѣG?bccct ;H^Ν$%CrǓO\eP䞞 ǘ~7 7 ׌(""""""""ylڴiܺu?FG)T""xƳ |7TqDDDDD$]J HN"""Eɓ/..\zu瞃)BR_|v4g#p?~#HR 'O/$668"" B ȿ,N:bggG&M6 iaaakƲtL巰mee?NΝٲeqa _s |';Y(f잟ǒS>,}QKDDDDJXn2:eQQ`g/`t+1F5!, 10w )pc&39 Ba+[iJSӜp¹M##z7Сo6F)Tb'N3-߻w/͚5#<>:dtBK!,X_|;;Lveh۶-*UƆ3x`~'#"db˖-n(Zǔ]O2{l_ #&m~wV{>t @6m7o7o4:/?ht )֯7}{⚧al8|%DXr‰8QV&5 $318ƈ#hԨ,15*)`=/_+˺+mYXXdbccٳ'...X[[SreڶmݞIII :{ ++Νɉu1LfͰΎf͚­[=7nG*U([,* f?',[p!TP[[[֭˘1c|rƘTf̘A-Z*666899ѵkWbnQxe|UEi*2GwfnYZZ| ҰaC̙s5fscrrnŋDEE("3\bt)njNRtݾmxV={]B,-N(R(Xb>DM<[LcթNwC &tHagee̙3eF)TR6n@&Mܹ3-[_~̛7=<,dĉ|7HJJ ϟ'**mxލ~j~YDRIls DEEqe?NN2e}cܽUVaaa_uƸܜ[4h;*HUB PEDDr㧟io$]>Z`h~n+"c)M7K,E1^QDDDDDDDDa„ )SѣGEDQAH;u*TȲ+Vp+W\aܹ<߿?W5j3f'FpmM:}mٲ֭[1-Zɓ'ӷo_qtt_~|}LmڴOOOoR ~)+WX6k,/^LnݨR eʔE ~-5j`|g̟? .бcG֭[915%_JhhjȺ;GAǎ)WիW(aun$$$гgOL&~!~nέt+V#""""'Tt,""`W 9?^f2g#qÍ$' #=i9 '0:ܡlٲ|̛7㈈**)`.]c7nL@@˗/ѣťK3fLdӤfZl}TZ fYWta:ve]zw,<yعΩdvŋiժƍ˴>7V3?wDDDDDϛft)JsgS~WBp#Hp0&KDr_]q?(""""""""С]ta|Hq9~NY`[nƏdbǓdڵk}^zWؼy3{L%7UFXX:t… {L&iiiqmn߾֭[om6&MD׮]qpp`ڵ(wa}5GCJZ4?Νܹs̝;#GfsCyf]ƦM2:p}Wv8sqJ~HMM1x`:uĬYHHH ))d>W_}{ƾ.\HbbbƼ,[ q~2TB*B54UN(?Xs 4z_=q9ҥy)H&P6.^H޽qttё}r%<==3=3Iٲey8~8...7|ᇔ)S˗k.СC9}4 VZQti4h/;v]KzI3% 0d> k ]>߀)D]s5G~󚝼t7n`…90s_|"""""Y4j!""3W-YJ2œ9`.)_p"r/uCAɟ,f1774sFG)1J*4:TR:wLrذaCu{ޣe˖TT KKKiҤ #F`߾}<әʠAY&֙֍9ի-k棏>"""⡲k_#<<<\PdkkKtt4AAA4mڔҥKSL6mJpp0֭&; .XSbEܹ3[lRJ NNN 6 ĸq2e 111tggg)S 7fȑݻ;vпjժ5UTݝ3g2cƌGax':vbmL(?o^s9y۰a*T#""""gT"""9v-;dǏd"Ss!H@NDr[эh_7&`q;݉!""""""""%³>K>}9r$׮]3:,L&C޽{7nnny$6l?{{{<޽{3|>sFatLҋwЊV0K, ש\2$448ENAfaa%K޽{l?<<<ÍHw?k$"R}5ׯiXt)=z￿J~_Ooti}R" qIqLs炳3}@67*WYbf08p _|y7$"RbB*sy4h@ 6:܇IqV7U;KKb-{D֭ Npz9J&.t"YZ`ɒ%XYY`t)5wHafR&&P>_bb,eŗepÍ 'c?("""""""R,U\石FDJ.ɉѣG3c 䉄L&S3JQ_r3GM/z U8\ n_|GZjFGaCN"""ٶmpthts,k.Y &M_5wֲ6:37ܘLNr'Ҕ4g>-#+gaȐ!^D0RAA矍!F2 H)X?3~1DDDD*]jԀ~3:fQQ9s!+̙~hFV oF4Ԧ6Ou3$`tDbTRL>X,YbtC D5ɤ13,d!xGDDDDD:et )VNNΟ@Sfφ>0PHVRxRrc`XD7DF1EDDDDDDD͛ӯ_?F˗#"RT""y+> P8"""""ߜISHa>>F'WBpdt11)ct:)p"r=IM ,g N("""""""Rtltyd+Y`1FP:`2{69+V@JF[׮ AjԀI`ps!H` -kt:),x4\p;݉!|."""""""*U?&,,}GD@ DDЃ `qDDDob%w|Eu2K,%"RU!""bv\k0q"Ԭ .o\n>5 (Qs!HrF"> "DYBNroiD# "("""RbiiEwdd$YZZKg*?Yd7G%/femKq K [Ѯխ~vVmeiZ^-EJLJ4+RAw}43pf|޾|1s8ssg%",11'NIJeȪ8 z("ApHG P aVBg/&"\\:~juGp<L˖Ggo6Gw y 8 aF:q53x!ݸ'pB0(K h pA;""" d`fr\[7`Jy l0S o+FFD?ᗈ6̀]B ;UvUHNÁGN~;:""M؄eo-&b".e،h@! (ޘ2e lmi/ԩS""/7ߌ{ MMMrCDdBDFmV< ?A b01>#e.}zϯgaq 1dzѓ|ҀK/n篿61H/p*@p7ƯUV:NH`{ `vZ^;"""25xeq3i>1 >@b?Yuv3{[bY1""TFY^.oDD$O> DG['C22Ӂ7@L 8$%*uc%"$ QHBrX.ąHF2&wDDDDu];nKlm1oK,_?<8 zo:V wŘpa&sDDDw#qp !2"4千݀{{⋁UOg99i@cc]?1Xed 8zس0Ax/c9Pb!XC!K58(qqq>%DDDɣ> ///ܡYQ hKxI;jq j~xS(Hq3e{;q n!""4  AHHn(#u Cyy`*W_ ,\\qE/!ODDшF;~,x0 qCҴ~QoKq-l/!""l /`۶m8zYQomԠFojD )2"""2l̆ vs3fb ѠP\!: ";@hUjhfrs?\x!ptj GS'QhZ!;MhtL/ETBٳgΘ9%DDDM7݄H,[ Bp "8 4/E3Nݸ Np,aXg8 ..BDD,wt<1s??ہ"cǀ9s䉑Ȍxc?$NbfY<@b!|#81~ DDDD,XXp!^BDD4X_=b8 lng SC؊eXA!DDDb,F Z[ъX,cDD4h !DDvvQu"mth㮃E/X(D!c=Np6c3a n8 T/FKN{Ik+/f{ `6qD,Y>("8 ZъPl1BTDDDW0 #0B\ %bѠBHcذ?+@r6kza?~0@q1֋D$N @C?@o-pQB)JeM#tKJL""nڵ+"w(DD!D>@1a [Ÿ 2kGDDDd [,b8; ..g!""#O:_\|oiΝxhDD< p >7 0 /۷E(_W|oeȼ-Z`b ?0SO q;Q?Ѐ#//!88AAA DHHi`v 9zhdLsxW>,(..FQQP\R wڜJ ~~~~󃯯/R ! ; "A8PQQذa A`` ȑ#ACCv#F ((` wۈY(=ц6Lt<˽!DDD$Yyyv`ڎP( \|OPRRRٓ]]]oooGoЈ槗NDT{;c@nMM jkkQSSϣVϟGMM PWWF455t]hMt~T*{<3zL{EE]BztP:XB\zz-puu=Op?{nnnpww9`Fߎv<Ûx+FVv)))AKKuF`` F||(((@mퟃ`4u("A#0 ""@ݻwcϞ=(--`F\\͛h8;;1YʢEꫯbʕHMM;"""3Baa!l繹ڙwJJB<c߾}{??.iOw(쐀<'0c uC?7#FR?BBBpWjzϠCWJ;ѣGQUU<ݞ!ADEEyhVٹ`ggg`Ĉptt;촫] z+aiW1l08::Z}f(MMMhllq.-E}{;*[[ֆ*vhih@II  3%A"F1BXsب]1ǿ{q/^ PkEo455z"NNNzR.3fRGkt}uiqrr^sTVVسgvڅZ{E\\"":&ƞ={{6m0w\ʜ"""2'B_~SN_|kV̂BL;Р5JhݟvP1UUUcDZZΝ;Z 0}TPZ--zj4=u(,--3-ڡ@TjM9Xnj'Ojg]uD7=~ãi!"p@cuuu(,,DYYv6q㢢" 666;jWV stt`V>Fee%JJJ믿?e`(2rHig$"2HBRA:{ /،Ͱ릩΃=tiV4.T*4ڎt7Fff&>I5dt9޽8x lmmgy7pFRaٲeXl˱w^Gދ'"667tƍ'CܦLXɺHFWFUUvM#fDDDު!!!7*JDDDmФNJQY *P8#iiiktNsFC,\"##t eJY Fzs1zfMز9Q3YYMM 򐟟塠%%%(++CQQv%]^^^z'MCJRk{oUUUuLii)N<z6l: IDAT9r$|}} 5DDz2v^J :&PC6=_*~{@qaN55*i`' ""233T;v Jزe ϟo^^^Xd ,YF!55o6֬YJX#22G^z 'NDrr2.\(w8DD}^4d[-C\Z& REW`^}PXXh;vXt<җUQ,Yufptt`nYSSn̚DW!"3kllDvv6rssQPP|ǝ...֖T*|}}Tooo Ȣ4jiiAYYJJJP\\Rk69sBNNsss  D@@1zh899Y"D4H]Q:T4W |*[šBm8~546nt=aNNADDd98rG!??!!!1cVZ3fqvvF\\qF9rcȑ9s&c-|.",^+VR~ "{eӠmyyyhmssR0~x \ 2 =qrrB35FZZZ ol???ΦADDfSúgii) SwFFvf0K$!^ 4_wŽyĉX@㾺uup駟l[Q7C4Ęc /.++THz|HNNFJJ o_QQQڵkTٳ̓3Zcܹ\͋hxgq`xO8 f]Ө@4rFFF=gŶI̩CZ~7]:p:::" J#cƌld""љ_[Nm d>pfM~9Dd@MM ~7|ΝCCCvRe6HY9;3DGG3g$2!Z!Brrrpq;v ǏQRR3#ytt#yhh({!!! ԩS~ގg_ծ7`֭ѾKq饗"""^z)H рוN5#U*W:%DDȑ#HMMSNaȑ9s&/_8::d...C\\6m_|!!!1cbcc1c DDDR?5k 11rDD+B9РKVT*DGG=g|ꉔ4 L Brpp#]iZ#"pؙ3k6773kRW\hP:{vgyy9lll<0aƍ C&^Ř1c0f.77~?~xV1rH" 'D2J4pR ""28pؽ{7JJJR[U4lmm(_HNNFjj*6o Rhbpg+l.]7x>,~m!"!ZZZש^qVVΟ?_SYD>$$2 R3ZJ#iiiΏfY;3Ѭ3k磦FuYꎟG4(dgg#-- _Ж~DDD 222GKD`hĉ8v;]v_D{{;|}}1ydDFF"** 'Nj4 566jsS"8S^^{"55{Ecc#&N{7tƍ'w7~x?Wƹs{n;wߍ(n@@@ )Xf  ZGVġCpA>|pssCdd$nL<'NQ?ꪝVNr!$ʰo>ĝwމk X`\\\,Ȓ+M՗I5tE8 e8pك]vHLLD\\\$իrM7ӦMC\\Ν ___C&""lmmzj,Z+VE]$wHDDq@if8''G^^^FW`Oѱ4 V9|0Ν;J`㐐NEd@ӱ~ qfM_8{,6l؀w}9s&`\MDȑ#d,YO}vqXl< ;T=NV:ti ]+X7BTOΝݻQQQxg8T*,[ ˖-Cyy9݋T<#{1qD⦛n¸qh׮Wp$c v64NNvVTv%NNN1b&N `fF|nHPZ}-}>/aÅ^ӧOG^^z)ŧKTj5DөJ#iii?:? UFd 3TR`S%@]]Μ9\qcI.;2PNř5B!QYYql߾رc:Πnvq۷%P`kor\#kW_mXb^~e[ ___,_>,.B,Y?%wN =)FS' }G!??!!!1cVZ3fu25kѣؼy3Ԅ<^xqqqƍqb׮]x1rH̜9񈉉!{ǤI3 %%Ep$Q>|:vXi]_s3988 00`'iJѣGC͛7^'0|p7'lYܖ>+ O袋۲bfnMMMچCN:Z VfMP`ΝXpEyf$&&Z:3t4;3œƎ'|?Y#D52bL5 ̚t6 IIIW[)'%%Yz[^NNNxq7`Z!MMM ŲmX}L0={vjdddYf .ϟ/rrrz}nuqu .ŔJ7O8!O. &̙3ŧ~jGm\UUUã}Ο˗/ =~NjKSkoΝ;-vM6Y, ޶mڵkҥKE||*JO/O_jXn}*wC=$F%[o4e)qMnnn"&&FdffvO9R޾w=XghfEo념~=R&58y˅OpqqSL999{ReZNn9f۶mbݺuzep1|.DTp-򒌌 Q[[+w3z='wOܹSty=>>^[L-jOB!gӢVoٳgM>|˗|Lcκ;ޗ_~iV6[[.`H-OIIg_XR1|.\\\ٳɓ'Mz?uI˽ijtrz:+"ym޼Y_hrBP+W Z-wHV DzzHIIk爍OݓNNNBRHmܪUĦMDJJHOOCdFX7ºX7Ԥgd]hNɤ~!L#6m$bccbڵԩSVU455)e!C})oSnnnDDDɓŌ3cаfz@Hcc n3ndP*=*&L}~i(n*JJJDIIxwD@@8}tϧ_LLطo">>^vmݴiEEEbɒ%[oM=ZTVV꽮Pxzz:m|}}[o%JKKEyyؾ}PT"//Ois`zz̙3"((H{ŕW^sO>]|7Aݻ{ܛ E}}'O6.BCC?mLB#9s`TYY)ERRncnô^ntQUU%[:Cm.u@;3ȣܼyիEqqhjjǏjcj R?a윝rcqm$0Hͻu;t._\wuFKכz=2I_… ɓ'/.r`{Wp X,C r "++K:tH[mvZm6~Qwpq䎂 S+W߷PDS&M_V1cƈ'N꜖2V/==]u{E@@x7nܸNYLMgoX}J}vQVV'88Xo@ԸMijzs4Ell4?,4u˞-vl6Ρ;hYY]]-w-֍y֍nu#dPTBhk׮BPaÆXm6d-Z$z.}8 Z6@>,%:"bСClg%""qAC8 ߴiiivܱX GPt連[ɤ9 ENh|bݺu]{W-k.m߿VVV OOOmOŋ/x[nK.swsЗzoӹg4ܛ-|>цL,999>&M^^^̙3&ehjjd;UDDDF222̭?db!пusuuz۲MVEmmիυ?a,.cyf50Cs]}}Lй|o,/oo_oP૯>/((-//Ox{{[46c rIY Vjfͪ*!!>LH1_ B! z#.8q^{kD])ǔ2 Dj IDATG~ڴib⪫n;}2MԲW^yE#ԸMw%I91cDYYKKKEXXX/l" }՟pu#u#=׳nu#C@TBhkkk˗/aaa9rHHH)))V水zC OOO1k,o|J-H es_[^Yt DHHHLL)))Ydu5׈)SƠ!4?~+ǎDDDڪ*dggEEE]䠽`oo///CROX I  닺:/{wY\uU(**tRnkoo=jXhӵK/6m%%%:u*NS˕R鉒w9FKK Q^^n4Ɍ3P(駟Z휍=֋DvrrrrrP\\ D`` f͚+VxAKolذ)))(**BIII!/ダx{{# >>>׫gUuu5֬Yݻw \r ,X`'eeq;)ySOKͻ,ޙZJ}ף3es_)e(Sjm'‚%%%(,,Dqq1QYY:///sŃ>h?XsN8 wׅP*xWqCoH4(..Fll,233{4ؗ,S)!x;wĦM_ॗ^~a;(//O~RvͽٗIFRoooj8::_lٲ>(,nԄ{YYY(..Faau^'''G}úcgH_Ϻ֍j5JKKEQZZ)**BQQ֋cʕ4iRLΟ?QQQ;w.͛ѣGIdb۶m=9C}ɯ喕ݻw#%%=}vxxxL<nnnػwܡ hBYojc3Ӏi&̟?_̉EEEfhhjjhߎ[n݊K^^^CO11H= ]WTTX<-=zo1B򌇫W'|;b%"W_}YYYߏ?-2~.5cymR%%{P(ܬsCj2)QΜ9ʆJ'#G"22~!z-,]O-QecJ=^LL ~466bǎł G }bbbz<25M-*aȑ&mJ=л{/e;H܌vhjkk4dnX77a ^{5ڵKoԲ~R1gyH'ľ}#Eb Y٫]V8##)))*coo///CRigPT\E2z-8p@:DGG㣏>#<hko~ *f\~)x Ϙ1_}:²epnc1=z{k,oLYjez_ZK_2_kf6wyUUU坆ӂTWWNTjʀL4K~WWWy 2B/III;G֑zxP[['N霖21OTK.o]s%W_}]v:YQhj'--ˬG qRϥaٗk$#F@uu<1bDi/v܉+bNNNx566),,ĉ'jcu1T? n+֍FX7BjnnFEEE"#::K>6 "{ACC8TwT*">>̩P(p.y]:Oe)cu_dff"998vJ%bpww;D"""5k"""/ru "D¨QPP ERRXnX|O( @NNNBRh /_.6m$RRRDzzHm(Ξ=+v!M&Ə/N:SD@@xwDII(--[l&7u>c q]wgϊfqidELL;ٳb„ "99Yg vćo--->&1xRǬ,$n*JKKEEEعs|u~{Guuu⧟~^zظqcŘ1cDqqJDFFP$ġCDRRXvXt}Ο퉉bժUbӦMb"##CY$NKo;w7mdcRYY)222X'$$h.<<<3T*Exx^>n:m6f^w?3f $O>beJy BlٲłOoHt577 OOOO2R...^_|عs(//,Ξ=krܦsizol'I=ٳŏ?h0ǏXIۦMB_ܡ, DFFؿrruDRR*w<֍;Fz֍nd(hhhkYnXjHLL|>">>^,]T]V-K.f""sjkk˗/aaa9rHHH)))I%[QSS#Ezz6mXf޾RRHǜ塾֠W.]*"EJJhnn;D""~!))I؈LCFdmi"EcckXѭdrww7ٴsژҹCsъY̜9S1sL?tgh{IIHHHA\xڎ(_ 7xSN Ç 1i$G6!:2QF 1f{nӗrMcnſ^f@RRHOOUUU?db!R544uݴiSt;ҦVNbcs3--M̟?_1B888QF~XԘ|,c|.KX>+~&bccř3g؍/%2{geeebbȑEʼn.1|}M-HH-ܱuwO rizg 厂SVbP:V* |N!S՛:SVRc<~puuzİaÌvLjŜKݕu"&&F 1k,q.~R%DMSvR+F?;l"֯_/9=rټy9lǺnu#ncɟ4Γ{u?i6fddZ!4dddk׊HP(İaDllضmlmGwu3fpppbĉbÆ ] }Um~mIuuu"%%E$$$h?šCJDDd@{{~ܡ XBYo㗽r1@DDDoaqUUUڥfggg#77mmmJ%T*vvJ<446662H?O;<:>O) ܹ .7oތDZZZP^^ntyc;::S||e5ۂagg'c ɒ2331g;wNP$/rLaa|zS*z|%$$2L&--a@|P?o}fuM~Z^B`ժUxgqw⥗^-|>@P {ٳW_uݔ)S 88Ipy>>1]l 2Unn.>Sٳ9qW .._=Ǝ+wdyyyطo;Q{q)΀c7"kM}HR Rݧ`LܹsچܬR0zhxxxX+yDDfԄ.ǧNBmmvΑ* zɞ6/::HÇϟ{ξ ]AAApwwdR;N8FEل$4SeZ\ JQ S3rK+sܪi3#5M-f2Z9 "8P܁/u]\s8<0sLzꅳ3/ 0ZCCi-f m۶Ub1CٷL'vlllxWi֬gʕL<=zT؂eccÏ?ԩSyM`Zh%Kٳ%Kвe Y ǒ%K=z4yyy|\"dSlqiN+MiР...y"R dӅ.A||ߎ-)\;8rYoΚ[RR3f`رԨQPSO=e:Z܊]pرb66q"3TED*ܴe….[`4;r_xHMM'd優ԩFդ~rK'^i&)5k$**(f͚IHH )) Itt4111tEEDVVk֬!))˗sqBBB>ДR֧Ox fϞɓM*Rr=Ph{޽$''[mggGݺu:iDKDf%ؿ?YYY-6+Cf"O5EDM[iS)D5jG}Ĕ)Sxwy?~<;vW^I/";vPNz! DN4VDDĐZj1`fϞK/H""*ly"K'''sArrr,u< IDATX| t`D]v߳klՔFgGMb)Wд$"r֭O?O?]lƼy7nnݚm=ܣ>eyyy믬[5kpiر##FࡇtTTCDn&Htt4̘1Xٳ *`E$$$pQkͬYT`#""R 8zKҧOqDDTR].h߾};߿|SN"===kD\$~G9Ȱ^I<렡%ur1C^n֭ tƖ-[Xv-k׮eܹL8WWWڴiG7o"Reddi&;֮]LJ?L0{wMbccc:TAj!RuiҩHQF "##d„ ݻZЭ[7xs\!>}5k֐g}FFF{h1ۛ?O̜9g}V=E8P<==oKrE:T⤑dCZZή4i$((HBx"'OdӅ.}nGFF{~Z#"BE]YoH RmTj5j;;_`޽n|ƍ.hӦ u1_D `lܸsNWQQQL4(ZhR\oSlN>}KM5_DM5D4TD aРA 4'OrJ:t(yЭ[77nl:nڿ?˗/'))o5kkF׮]3QDDDСC;Xz5۷7GD9Hy`VIS6nȁ8wNQx$((H;h]iMAK\:٣HuΚ"7Nght )jz@JJ 7ndӦMlڴs璚 6$22-ZдiS o*vɖ-[شi7ndϞ=Cdd$=-Z 22@ӱEJEaaǭ6(ȮR]\m_a#t*"Rԭ[^zѫW/Xf III̛7'BLL qqqm۶J_ؾ}; $%%qF<==ߧK.ԪUtDAM4]v̘1C!"b B]O7C.߻w/QnݫNW^|/קhRI=OVVNDDŞ[*B5)Ձ3ݺN"" (( {1e*V$2sL9+7&<<0ˆ 88XE*<ˎ;رc;v`ΝdffG-ѣCkE,TCtzA222)lU9[ҤS>T?{dž HJJO?exyyѱcGh߾=#ߔ<6l ,Y‚ӧ C+tAӾEDDӹsgvAxx8"R DԵpĉ'|-sppFBCC9pdt Y> ;i>qvv6E""R4;k[)d)7CDngBV!#X/;suooIII,7nLF nPH9ݻ={Xܹ?\p '<}X-HII!''777[Nx饗qƸ QS Ue9ϯ/-""FɁ׿Ebb"={ucj:.@V"//֭[//8 :At4LM."""Ɔ2l0&MDݺuMGjJ!R9::^sV$''sAAO,V(X| %\gpI=ׯOpp0666HDD*Y8L5+Z "矡Y3KQ5ҲeKZly׊6(ؽ{7_~%)))e%/tzA×sdݢbEW&RuFI""RO|||'OfԨQKLL m۶-9E'_:d…%O2 ŋ2Z޽ۻrHx?~|09"--z}'''~xyy]tzѱ<7S䪲9qGĉrQRSS=zry<<<'00???Zje=@pp0*"JƍIJJ*V 8C/r_\ʖjj .^ɓ'Kr޽{9s94i$((WWڼrm.zХYYDDJYth1H[@߾H@ƍiܸɁ8t萵hѣ;vօnn-חڵk[?ԩS|ڵCK~~>O~:uǩٳg݇ ///i׮AAAG`` ...RD[iu+?;w rr.kQWUoqN/wI"""Y&QQQDEE1k,oNBBYjDGGC.]UM=NVVk֬!))˗sqBBB>$66= |&Y.JiL>e˖ѳgOqDRA_`GNelܸ2.-)iWRxjSQ.QxZDD+ΚW+)-f~Ο6DD 8~8ǎĉ;vǏʱcؼys.\><==/+Sj OOO܊}xxxXI Å ٳQ̙3ddd~YǩS5)T8MÝwމ\DXj@B|!y,{ܣ,jؗp鲅G4TDD|EDDGy/UӇ}ELL ]vuIV\IRR+W4oޜnݺ]y sq#G=|΅;KqDDD\ѵkWf̘1B!"7z= Pc޽9S-ˁE ׫WESJ*8x 9y{ƒ Az)""Rnf۷ogݺu O {ԹYPp=\RRؙIr-fօk,H &88yلK'=:urYks)>qsswwwkvvv䄣#nnn鉭-nnn8::Tk뗞j222SΞ=k->s 999;wϓm3g%##lΟ?oY?IOO'##/vvv6TiҤࣤ2!"U0y Á" *CSҚtZ~M:)8d&309}4k֬!11q1x`É#66zf޽$&& ppp`t__߲ 0t(DFZDM~l)UC ᮻbڵs=H5RfQ @Dcgp!<ȁo9tNٙ 8t)))deeYSN QFDGGֻV[mY./"""7.qIsPg'p?NJJ ?3.։ooo|||ݯ>l]t~~~x{{GVߴizx֭[6֭((X]DpqqŅmaABiE?ҬE$.\̙3?b E 7,(E MQaoo_]OIѢ+IOON-i-(zj6WꊝU jqpp͍ZjqلS_o8Hv 7zjrq=ʱc8z(G>|[@ rEZZ܇w܁/d' d#pǝxzYڵ#..ϳzjVXܹs8q"޽ooobcc1b> NNNZڵǖ! °a&򈈈 kժw}7SA;~}jM#AAADEExլˊE8@6m $ xࡀֲAX:l-ʬnݺ,c#9$%^/++#Gp1;f-9r͛7LJ{ p `z1t GGGJ~ EΞ=Knn.iii\xL.\`mR8ӅSTvv6=;v{%v/… WXnk.-RTaƥ_wvv...lmmUV2RF 7???kËKPmF< 5XHT/+W>>>ؖ6ȭX gJ~oDΝܹ3lذ+Vйsg?T^66110{6|LT׏'N^Ml$&Br2\GgAggg ̧-X"[?14yybhذ! 6,ҕ*>lvjbNHM?tj '''<==K fȑ?_DD Aְ}3X3ԢVW~2EĜuuk7DDDJIZZgfذaxxx#""ʼn0d0wn(8Y?yLGrAcC<48eysk_~p5ҜTDDD*}gQDQAHYXfOOc1jQLe7!4B<4t}rH[)DD[ԬYSADD9:6KaعyZ2I4EDDD$&qʫXر&OL  /t2)iӦ`H5r"<,oݺNCOzr/2o:܀dLj~ b6xIDD:uٳg3|pL`,6˔!C`9M!t k%"""c{L&05|Al]€p]>"""RaۗO?TQDPAHY6Ͳpj\Iwxl}7GDDDS6ٌ`=AQ@o כN#"e-9\\,EDJx7qpp)99n ˜1I {X&Tbf=yMGr0B?bNږm,m2AdDDDx'puu("RM D,oCXE@2/s㈈ulRIe SLG瞃h4liD,Y H)8y$Ft(? , ,w2<Gt)KG@߾p睰?$&¿͚NVr/xt)c-E\ -_}NA׉TqqqԪU%K""U BDJ0y2 AA\S( aQT*""Rlf3oLa ؛su;9&"R9c|,,_ng)J7ӓg}t)+0u*4n +WZާol]Ex#cj:HU X:@O c]5kNWfC>iqDDD f0wr'=i:JUe 0q"4m Ty=z`lݺtT"RYxpƙy,1GDDD/լf2t7g!ͺ{5r#""_ˋ޽{"""-1""sgعFjPUg?7lojhӭsq ,ĭ[ÓOBv믦TYQQQdQD ӻ%[u" > =d: {D'^r1GDDZ'ь&XtS#E'ӈpv\'q IDAToHq>CƎC%jb"""p=#po0>x{NVn{0LG2rd$m:N[^W^ "ǎN&""Rн{w,YB^^8"RE DV馓ܴYb{xwMG>#6L63@~p4"r\\JfM7)NDD*____z)QDD48O7lh m:HҳgO9·~k:TQ*OoK/Aܴ4d8yW8aqDDD\rD̟ t7se@߾ȍ* 5c?ܒ-Z㱷7GDDn0q"ŠWвdF ("""RFq7y1>Mǩll .v41BC-D L7nL˖-Yd("RE DVo5|$e^ /SEDD2Z"RHaLG5jZhgHV 88'X CDDRyW W^ȭHLpxM˱[-:|YB)q6GDDDpl:J#GΝCлe2ߛN&""R%уO>,QD RA`4ɲs‰9otj%&1gxBLǹuA 4"r.}_SP Vi""՞={Xx1Ǐt;wBǎ#ЪL`)ڮV}LG208h:NS̟?d)'tGDDuޝ,MG*H!"7k0xg1t#ҏ~\8"""a2/RzL,"LQe*X&!Cnf3MyW {ȍ:sdiS8y֭e t asxiD#QDDD юv<#To-Z7iDDQa:HMtt4K.5ED ܌$ˈ5M)U.9,f"""R-\oӀXr7L5jXAa ӉDD&޽> &h:HeCFsX2ic:Y=b`:_hdkV!Âj&""rz_|MG*F!"7*?2$&ڷ7HF2b:H7$e:J鋈ɓWaiDZ\]-llOt" 'N$$$'|t^|͛C>?Ν}5t9aQDDD ,OӔHQvv)v{@׮0`u]k:H裏HBB("RŨEȍCزҩHa #)+d3i?~S6^|V=_V-ӉD$++ .\@VVϟ';;sΑCzz:yyyKFFyyy={+ŋ󮮮]v=lllxuk~]5j8::䄳3akk5k#""c׮]gҥԬbSmEDC`cK?Pө*,X"F0HU4 \"o:\I0kÇýZ^Μ f:H£>ҥKyL*D!"79?񺊲Ǟwxg%+yLG]Hc#LG);66;-E*3gpqN8SHKK#==b9sLrrryԨQOOOj֬IR-(TEծ]">((((v-oo_ᇡ}{t"ˤra=ʉ'8qǎ.zy"v}WWWpvvNܨ(5 dddKZZqܹ c OݻXL\]]///W>>>RvJh$$$l2jԨa:\ɦMSjY<7a8jc7ަHx %xtі׷~ҥ$::777>3M RW"a>&NuM)3I8یa8"""UlfM6f:Jɲ_?hM'j$==r:d=}Al^^^ЬY3E xyyQn QQ\\\pqq+33cǎʉ'ػw/?kyyyzzz@`` @PPpED*W^yp}QQDD$iifSs@Tll,״ulbg:$/2'`9̓޲4 bر BDT靔z5K!X&2t'`ӑDDDL2t^5|tXz5M'*СC޽甔222swwr+ _>nnnz),.iР5[PP`-g|>| .XoS^= ~4lؐPpDD׶mOO4DD"JL!'=ө*9̡-hE+QDDD]"FWr#R0f X&̘MN'""Rt҅^zqIV"RT"r=΅javCX"3,7GDDJXBE^4|9:¢Eк3ȑI%simƮ].+8<nnnE< 4N TG%fcc7qWޱcǬ"`޽޽~e"5"<<{{,R1n8NyQDD={`Xa SCc7EDDD,fB _Hi e,͡woxu2NDDBf͚\^z#"U BDǸq=cqI$CH"""Z9d&E5ݼ9Ld Iî]رc۷ogƍر}QPP~~~өS' @HH!!!cN՚/juy\kHя>]v-IDDᄅ뾈TH6mbXBED*˔ 0R|V_{'yxvgӑDDD*%,GQ:/r0V-ӉĠ3g?/yfmrss',,&Mзo_6mJ&M0[*)[[[kХ.^Ȯ]ضma֭,ZN&Mhڴ)͚5e˖4k ;;r ^yZh?l:|-))oG&ݔ \`G?q4GDDDJ˼+Dī5,W|&N>43NDDĨ.]0tP233qqq1GD*깺]F fИLe*h:HO>oh:95jҥi!=DRNrrrزe ???#v 6mJll,cǎI&4jHӤI4iBn>ζmغu+[la۶mo#=='''Zh]wwM֭ 2"Rlܸ??\ADDL;~t@~a qiYLi("""R6WsqLR00oe^fӉѥKիҥ8"Rɩ Djo3X²pŗg4AnvӑDDD*O$Q< ,XIHMMeڵlذM6qyiժ]vt\Ӷm[ڶmk;wZ yrssiӆ?888ƍG֭ԩ(""WAYa::²eеT^˻t;#"""3nt ŋa@2Z=`4k>^|||hݺ5} BD䖩 DjFV,ݼy,f /Mt0xpMGFA஻;M'[Ɇ HNN&99_~5jDdd$qqqDEEѼysjTklll #,,z uֱqF.]ʘ1cSD{)6l/`ժUT_[?C0iNU%|ɗla Yl:4Zj >F a01#GGDDDM.]2e j9<テի᫯F;!l]S>Q5IDDXjͿ\Q*W_ '`Fps3Hn@nn.7o]/Btt4#GtT2gggGdd$ˎ=ʺuHNN棏>bԩҺukE=܄W^ym""R?3fwæMдTU f ҌfH)"ь7iE+q$=R\?kT >(#F`ڵktԒUJƎt(IO1L2M4&3y4bQ27FCZZ ՋڵkӲeK,X@HH .СCٳbիG\\ٳ~OOOMF˖- o߾$&&c:TׯgռkT?_ w œ9R*)UjV3!H){78^UQpr#aN z֭azDDD\Æ iҤ }("Rɩ D$kZ:VeM2`LG~'k^eQ*&??KQȲe𗿘N#%8rsΥ}x{{ӳgORSSy뭷HII ???qE*YlOʫo㈈Tx]huÆAE5bɓ'1cmڴ! Ç_WRSS׿E||,S@K#$0JJ%,e(CØ"""UP HE֢| ,_6ADN:oi#p`LHjҰ! L˖ADM<777<==ٴiS"Q ^|tyD%|ŊtH=sk:>ekkй3X3FbҥKj׮Mtt4?=k6"K/1iҤ8ˏ;FfȚ5+YfI&;v,:ɓ'VNNNԮ] ?ԫWschڴi>6m}̚5kܹC_oY&2e???nkpXbdʔo~BBB4_Y:ϟUVd͚yҩS'""">|^z,Y-[6^~4L2ɓ'>}:k֬xs]y"֭[޽{裏L.nHPܹ{¸q14gκ={I&8;;SP!Ǝwϐ!Chذ!NNN)S?)Ȝ93;v RV-FAtt4IoCNIfMۍM8]:eiX/QX5la S.M˗ömn;fLDD$A֭CEDBg]"gU0x0X,kC n:EDDĨiL%hLc)C~b.޽ʕ+sY~ɔ)Y*H׬Y3loy$/_x (@ؿx W^֭['|PDD... ڏ+qU^-[qEw,vٳcϘ1?>ԩ繻;qm۶ YVTxL/ƍ9s&'N~}Z5֭[SJ9""o.(_-sm7(ZtU}|ȇSDDD$.H:P:HRWPw.y$^xon:EDmWM=W\8f2_t׸Rқ޺URX`<ȗڴ[Lh_~%գz>|5jN 'Nx#""pwwby˗ Lʚ5k?;88ƍ8::[7nnn8::nVXAժU%GԩSO>{Y7`DEEٳgi]]]<Ϙ1CY\bî] f͚3!")Ϛ5k8p}v /%JѣЭnf4^x"/N$2I\cc:E*O矇+L}0iҤY*f͚֭[yc5nܘ~!޺;wbŊI߆ zx׬YCÆ ,̙3'ׯ_|݌1wwD6bp60}[͛7[k׮̜9kҥKGv=ʋ/H```eL2Oʹ _|lٲggg4i͛7M'H\G2tP)""m۬ kmV f4(E3N$2B?Nnn'ThADۊH Svmصk3BHҮHXMפ*(GOz+""iF6B=i:%jvAO^ϦkRp^u6mɓMxo&$$e˖ѰaCfΜɖ-[pqq]ߟÇrJ"""q=:t`I1|p;w.W\!,,yo dϞ=ܻw͛7SPDw,Y2v@ǃjԨ ݻѣGnۛǏs&Mo0 @ ]v899%3tPFI`` 7oȑ#tޝ޽{ǮMlo9sdݺu\r^zLjaԨQoߞrʙ?{f56+Ȇ В2,_5CI,%x<3U®]l %Jf}tYf\r޽tUҮ> tI1|GIjժnݺva܈jժdʔ www͛g7n0`<<Fݝ3RdIf̘ojժ%KdȟmĈ#X`$sL, ٳSfM.]Jǎ9x quwwgժU,[ŋ?~FiժC<<<ذaVXb-Z+WaJ(' ;쌯/3g|·iР|ܾ}///fJ֭i֬Y@2gLݺuɖ-իW,Z@eЁ;6\r,YrEi׮]+/^dɒiUB7o .䧟~2#"_sq f:EDľ3+HZP0;f7s4io:EDDD+}KMj*>6t-$ ڵa0%m3X랹v!"bjԨ!"Mq`>>l&UF63Wlo߾L:5M>w}GGG~wڶm˴ih֬:ŋ_k׮Hqwf4o798_@*y3Kg4gϞʕ@ρ޽;s}{-{7d4jܹ͛36mJ*O/2gf̘kR~T_xxxr'vBmÆ Ү];ڵkԝӺuku[ogyw\ZhA &L%KU^SN>?t-Z^˖-˜۷K/!Cކ]&L|UuԡB t?Xp!YdI}٫F>}zon:ED=Ν;ST)9"")߾}PmFO?&-e.3Y d d:GDDD(FqیddkĨaZ8tʔ a0 #,Iի lK|+vSdI,K#GƮl2(QK[n]՚$""vcG8B7NI{vzC-suuի/7o^._>wŋK̶oZ{n;y2e`?t˒%K ?4""ݻYAzV$i.fȕ+mڴa]|QjueƥKZqߏ]hѢYk׮l2WѣNNI;3Ocǎ[n,YtHu0T 'v?+ȃL[&PB3d6#"""I?Qwy7Ik$EHv3gl=Av0]$RyHjPre8`:CD숮%[xe%+|泇=I;}2k,ݻݻwիڵk3R|y|||ذa_u4Hp[,g9n:%rpK`Ah_7]P+VヒobŊO݂ ͒ w~}Ĥ;=蟻Ӿ}||~Efݻw .f:ED`Ξ=MLaP2 'Ns4Y[&-)""")L!=l@R9Ol3 uf 6]yHjPR%.\YD$oE> g6mL9iOcM/*[,=Çɓkq^>|8#GdѢEDDDpMnJfbשSK.… DGGseSNDD$X / )i3] С :/0/裏4hSoN:lڴ)U^Vmݺt3aZnߵkN)۹u}DR{1fwNѢEM爈<'N@j` k(RtY %e(C5;H*M4 iֵ<[g; ҥm3%{=I-*Wb_~1""vBB$wȘtM4iYlo߾;~ 2yӓ_ShQrѣyc9r$| /Yfvܿ*IDD$m|ZҒ|hzdᅦ~5j0|NJ"E(RSNeqRm۶ի\2 kEsϜ;wtJ5kUVE")xP7́*U=s):|F1\ """ fpSL`3پaC>  XO7)K,{=I-rA"E8p"iۦK,<Ï! !DoaÆdʔw} _?(mF b_Yr%ܺuӧO3n8\\\&""iYβtGʰp!Llh(njՊY7o^:uDDDDӄǏӴiSqqquO4رc4m4iӦzs\BBB; ⯿,$$N:7$=o7""{.ƍtHq4hл7JzfL[>K9<1!dD7wD2Qx<}ZIm7w\9XtYԦr?l7=z 44EsD&9s_~m̙3gxWyO??sL@&M$ݝ#GPJV+%JٙJ({QvʕٲeKS:wL˖-yM>s._ADDb&O{¸q>*yJcC^ҍnSDDD$ f08&;k.SeY3h7]&""DJ8t Dʼncll$s‰L-mF7^'ކbɉƍp$oIbXbMcu.*Qt3… iժJbmԨQ_~9SNܽ{''uVϟO9PK,Xb nxppSNYޤԳgO4i3cǎ~ ~b)$ӯ_?wg{ٳ籝??1bK,rÇ)_<7np±שS͛n‚ Ln߾͸qxwpss3#"b^X|m)St$91d&`:GDDDW~e.s|-ɷkxxm>TݺQ;:cʕDFF~-"_4C sBѢŸ6)ME/qoZy&W&[%HB`@':NX`(V ZkL=s-Z`ڴi 4I&IU>ty`` ۷Yfqfq^:-[dƍDGGPhD puu%22zR:u)""máCu+̞ (FtI/3nDݪ_v.3w.|-*Ý;DDN-['N.{!ݽ K[oȧ g8 fNy*XFy㉧/3*_о==( 5իSLaܻoS +""www,KG| ]oŊTZ___rA:uO|L GG} r%I\V\I*UȐ!\rD~)׮]cSDDٽ*UjX5W|L! љΦSDDD$nq ]tHʖ%m0ȩSeiYÆp2#2epq)"b4 DRy嗡P!%/DnrNy"LaI*U`bS4tMR% IDATxWػw/QQQTP>YIqܹ+>X,\p 0h߾=֭cq_~޽#F#@OQ3 = =&/|M[nMPOÆ Yzuk֬aÆ Ɠڵ+3gdڵtyȍ_NΜ9|Rxx8]t]vmۖ;wf:KDpM&L/y5#"n߆AN_֭MWIX">#q4#"""4 ƘN پڻn݂m7e""b'ʖ-ѣGMgЀIBB`Vx-%Lf_msDDDl˨F5Rt$T۶0c KI۷={һwoW΁Lg%?ÇgʕDDDp {:tǹs&M7`ٳ{yf =F 0޽KPP=zxGJccɓ?>aaaDFF|rƌIؗÇ' sr˜7o'͛Bvprr (YdYaΜ9*U[w1{ldb:MDe̙_tH;zWYlׯ(ʇ|HiJӑSDDD$.pLd(C)~nybU®]lŋf}tpe˖رc3Dh@nC<ЬMhBKZ҃Ǻmֲyt<=M$32n8; UV^ԩS?iX,άΪUXlŋ'9ӧӪUɜ93u%[lT^۷ohѢ8 cǎ8;;̙3Xx1oˋYҺuk}^`z}G-{Gm314ǤXblذ+WRhQ)¼y/ XjŊhѢ\ 6PD:.:Vd˖^z=СC4j)A`` +WחN:qI7on:KD͛L<>}'O9""}=*U8|||LW3}e-O:ҙDOxtX];8q (Y6jNDDRҥKAxxI4G^V+,] ;Ck32a*S@9"""_y'Õ+꫰s'(Q;wgɓt<)SC>}L<;?4jUд*I|;sDDD$.w0)"iG з/9 ={Ϋv4]&""yxxhy, /^=ps3]"Oh*Se&1tHVe-im:En{BniNKNNNt҅;voѾ}{+TB"Eӧ۷oSERWdڴiCܹi۶-!!!p%.\HZLgSvӦM_~y6N//8`*șt$LP)"""H3hF<'rO>G!o^S6Hp21xD14 DR?ұy n1a I9r^1"IJJ{I1J*ń 8{,7dԯ_yҽ{w[n޼i:Un?YfѨQ#͋7nbʔ)\t;wb:UDi888hvIV3*W_mw4#p&1 /yM爈H"$xNIJ]e =k{޷/\nLDDQB **t`"ϷЪyJG JXYO~SSY_,ٳMפ8*T`ȑ=zg2l0N:E֭ɞ=;+WfРArӹ")֍7 dРAT\"EЯ_?2eܹsr 6lLJyu!Hji߿?ٲe3#"\-6~.L|d$#I l`3pt4lӧW_AD4H  HJ!\i;ʑt<%G /`hNs,XLȳҮ|) SI۷/vŋ,\F+W.6mɓ_1,b͛7ٴi RJdϞ&M}v5j͛ᆪK. D$4i2dtHڸʗc`v76C)g9lf3d%IܥhG;^E9"~a@(W6l0]&""XB Gp~I] 6o3LH"ա:E}꓅,DD$ ' 0"Ϛܺ~~1#a(E˗/;vcǎuVm߿?9sZjTZ///Vrg̙3۷o>:ݻw)S 4`ذadϞt$pfΜɰaȚUɊH*q d;mN0s&Ҭ ExLH"}'|8%K|ם\z5v?'}/_///zEɟ?\1d„ dʔd*"Łб#\_|:. =:gWhFy%arض *Tn`(ȝt$!B5 DDIB$uYׇ\LHG>37xR2$""i6Pڸb:EСp6.dɢb|/_>vٕ+Wطo_cȐ!\~L2Q|yyʕ+Gٲe)_<9r0 DlV+=z4qaN>jݝjժ1x`X"2e2-")@XXfbȑ8;;Iax_ۡ@UbP 1/5IژD U3؛!X]75`A DD$,XK.LB$y6n)SLHMo>Դ""b}M bInFf];Ev/O}ӧOsQV^Mxx8ӧG"E(\0 GdChh(Ν&44tQpa<<<(Y$͚5txzzfS;v,...ۦSDD0p Lf ?k"`#m7""""_?K$(T/__x}m*;wnvi:CDR0 j wM32Xƚ4d;۩G=}98CLA!_ [JsҧO'^vOs޽{Yt)eɒ"EPP!(TP`777)B̙cS … ?>vDž 8wΝ#44{A DxzzҪUBŊ#C ?F|L4I""qx /CE f@e:GDDD!(0t ULHR]`J(QAlyȕ+aaa3D$ӀIys%!1GPҦDD$ &L4"Y,0}mpHvd.JAgN*UR%oҥK\tP ҥK믜>}?3vL2Jȟ??yQՉu"""z*W^%44K.J … ܽ{7ϟ PL4h@(VŊPBOhԨQΝztȓZmc.싊+jtVN˘umSVwe3ei6m3iiʹsrr,}ALTU@gw@pK{=|9Cs]GGJϷvhC<Gо}{N8A]]-"Mi W/ p?ک]DDMlj=X`kQo~b<<<″9qdddGvv6yyy呕Err2QSSc<ٙkpgyK6mˡTRUUEqq1QXXHAAq\TTuAAA6mо}{BCCqvvfrwIhh(DEEi};|8""./ -ٳTbgլES"""0yy!8"ryz¬Ypp[0xDD<ڷoOuu5EEEGDFkeX&MRz  b YTIDDZլ&0b1; :80}[o55\vѮ];z}"‘BB233IMMmPP^^~4T3~kjja;l jJJJ8}4eeeSQQqSqTT񵿿?DHH ^gժUL6/|g 6ާ^xBBB>}QDD.ܪU0m?@bىUR"""̎&"b7oL6(""w0vo3gHX9KS㈈H b8EIu\1^^^|||G9cǎKDZgsbvm}O?e]`EΡaF2)L1;\ '=KaJ뚬nݬDN&"ji'''Y Dʕ DL@3:w~1;HcXpqq("bT"m*b;wwI^Y5; rC aQÚ5P^nmwىDZ6nW_͸qxW3;/`ܹ"" :xq,JD9f68ěGDDD.ѣ =zwߙLDqiB| IDATvvFEy"r*Ƕf iv 1Q˼[$lvi! (∢a: a x|嗼k̞=nbfs7lv6maX'M Ʌ{Ïy("""rd p2!%矡{w?ƌT*s'=q\G@V jv1t3a˽Rkvit$ .:+HųX,̜9+WqF ={̎%"-ݻYp!sIE^ݻ[ B 3;8,?g2;\Ӝ)n!D㈈ !7{|x*sL8  N"&`d3;)ЛfG}u@T"-ވ#ؼy3 <_fGjΜ91eXY 믡];S)xL4;\xL2y̎""-ѐ C׮*+N&"b999Q[ͲEi*ǵq#f';Н<<ɓ䣝DDҤB/zCZ3>.*(0;Hڵk馛x̎$"-̮]ꫯxDD={`Z3<oEDDD.Q9?{~OQf MࡇC{ΉeBuu1DNi6@׆ 0x)Ďc3fGVJ)9!ryM }0`kv"ݝ{7xYfqwS.="r<Ջ믿("X݌c/ONAN%g~u^E^$p㈈%z'Ïx("Ғy{[?Nk;N&"ҢTUUjv S*T^))*|^]e̎#""*jUz5Сf'ifΜW_}΄ (**2;8dkΝb1;fp+W"@-˽$}gvDg|ƛ'f 2>:VV}X;?nv2ӸCD B1mLAj1;8:(O?ر0nՙJŻY~={!11t#3g}eĉfGlrsaF9D g͎#"""Zf2d S̎#"n|YW]e RQq5KDGTYY9+cڶ ڵNN"v}7;8=!8,hgBڴ?޲_wJի7nٙAm6#ڶmK,Ww1Gusu%8a0gx8̎#"""#>bx7͎""7w3XZvj Fi0juc`m$"Huu5555xxxED B1hK*8f2;3;8}#cHk0c݀sىDZp֬YC޽9r$K,1;8g}~_EDZGap/.6;8: :әY2;\Jx{T<=a,ػƍ;!C.{Mɱn6v,WDN>}@BDT"I!r,x;~gvq0i DСct`mf'i|||Xx17t7pّDAlݺ^xAADЧuQp=f'}g5p8"""r3 *\Gx8l֢ovͅ!0YDDT""秂q<55֖*s7x lhp%_LDDiNA By²e0s&}7{/TVJEsuu?_4;8~3vXHkRQa0y2L[@f",IQ8"""rukpAe.ioR2s~ >6>n*kȴiPWg^V;R\\ ID^ DOZ DfnZ^jNsxntn#~j+]͎" 2,\h122N%͞=zٳg3sL4$"g~zVX?oviM``߬><=N%- fОA\DD$Tu,Q3LN'"r L {wBRRÂv 3')(s79+II.5;8I#yXby8ic+[͎'""v&4p*2;V77Cq1 _mv"O?fTWWIDг>СC=zQDcΩ..};f'>;>8 ":' ]t7xW\͎("r~ksoSjka0,`SC n&{sCDfG֬[7ض nn߽jj/H t-W_pBON~D$VZ͎""-ŇMW^3gp]ntܬ;1qa8͎#""")dvӜS>osܸODĮ-^ W7r,iL""vłQDN Dcv sTt I$ 7Ѭx""b2 (cXwEZ n$~(L4%Kh"aٌ5#GEDZK&Nl\{7 u˂fNNioBa.s͎#"""@2ɍPCeLg:vNDTV£.]:v5i l]DSA8#QOdAD3{vc$hv a|x1X{|rrNNp- f'qhÇ믿;?X"bs2vXnvi f΄9|md@k/^W/p<08""" mf3՜{#6+])E8Zcil PW.. O[_] 'O˜1i]з9u8׾3QXXx,zqwwb5. ///"ONNf;q,yyPTW]evcWq)0 fY,XfABn90t(Lf(?p:(dp-^$Gfɒ%L8w})"rX5kְvZHKr%|mzpJ=ڜl*RA4fqDDD*C:Җ,c݌DD.LYY%%%TWSIep0 "\Q"83*+H֍Euu5TWWSRRBEE ?.mڴit///z...\Tv2ԩSg~!Jyy9)6m၏...쌯QL鉻;o,l?Wooo|||.F5$"BkET"AuAYb72 7L6!؛j8q}3~  b*X^}zʼ"-ĨQ믙4i,XH"͛6lQD[ XsrOOߜlj+lf3؄^$"""k';\q;o !HKVSSCAARZZj-()"RlV[[{닳=ijoT`+R8[!tC~鱭QNqq1999 csd{̹ハيGm8|}}?#"رcb_9;c9p\]!*$bwAaiN f;C5FQDg@0aȐ&Œ;7!88fGfwߑĦM̎""-#@~~bjɁ ? "/ҋ^fKL2.PMqWO4'ORPPɓ'y\R\\ワ/QQQ׶ۚZf' 1rPZ6m/d8}4eeeF/@***"##ARaaQp???h۶mKq۷Mu,h;о}{BCC $00P $$$??>DTWWGvv6fG;q,jv q@7s310OUSNikrB1IN9;Wn.{\jj}7,Z|DZ￟ 3fّD Zd 7oVwtp?cG"e?9䰜Z *""lb1mO<&'%''(%//?N^^Ǐ7O>๶{>oWWWީe4tϭ:oǜ|RSSnHppp㐐&rN8AEE BDT"q 1;8fۘ$RHjqf [T"""RڹCTi)޽b TW7jk0,r 9rn$wnv$7o_=7;8k6};HHr zAZ2>s2;\j^\p 7?xȅ*((СCdee8++tJKK<' P cСq###U! , ';;L*++繹Ѯ];ˆ&44 8ph=ӿXrrGS %$/lf̎&""&+oqEbak5r%,^lj RY]<0  z[X,e̘1\wulٲvڙKD.[{ED?6 b-ɓᦛ`PkeGwr'ӘTGDDD.4`!`x͎%"X7=zcǎq1222"##ǏSSSXǛCBB ,,XFMxx8FߙK@@>>C돭;2fdeeqСF4wՋn!mۖm{TUUgtIh|=z8'qss#""F ??z{r߿.]h~[DIıXIRUUE~~>呛K~~>SXXh׿RTTDm4>onO/\׷???|}}i׮o޸h@D)ξ\ %%%4:)..rJJJ >> ,c!!ۗp֍ >;[cyr?UOro"""̘.,,4.EEEQVVFEEFGAA1XXXHEEeeeS>mjs)cMAW=͝yܟu/z]s\6Yr1" ##s!>l|>|<j,8q"?v_Օ0HHHh1Dg={nMN:uD.]mcv s*q4 9~8F <[̼j;ߝv_p)Cy)+t϶sX[R***l[ɓ'3EEEMn4֠@6Zv54j9t޽TviNtt4{fb]j,_DLcX1FWVV٨ٿ9;;w&>>8㉎&>>^Nۙ}_"bT"öSZ9*=jT44Éj:CڡPDZ*pb˃ IDAT%++h{q_[>oo5( _Ґ'@.,--mPc=sk999ٳAQЙE?Ɵ#`BCC $44Ԯ uZ[o$.z쩝yD… IMM/0;Hرc &=Jff&ddd4ѠH!>>m< .^^^s_5Z`,رcl޼,;D"g7JVH!ShFGDDZJrrrsJ[aqvvv9SN5x[vѱc&S/@o qwm\Xbm߾}_7?ۻQHhh1Bdd$BUZZ޽{ٻw/{18@UU:Э[7wAnڵ6f5%77/~G=z:\]]ꪫk׮7s-rtbvs*atB/1_uu5 55hyA:DFF`@@;w&**:ˆ 44Hc\<[̅(++#==2222vuxf"##L,UW]e"."RTSN[R0&'sss]sss6vL7ҹsg h1cM^x{{_WWw1;֭[In{xxHXXX "## ""`uZ"|M6mmFRR1."_ƚG䊨޽}5*-"::;˨Q1<[ڶmۚnbrKFFFn7n;k6ҥ ݺu[nm.*>c,\O&==Çs]?9!!!N`` ԨOD-xhDDE?Ǐ7*ucǎMy mk :t@ǎС݅,پ};$''s8:&&XL`q>+HkDPPP"eeeݻ4v޽{Yx1X,:vH>}K߾}<\}q)zevsZ&ö3ȿ[J󩬬d߾}޽eTVVdH׹sgƌcGGGkk;iLɓ'tv]X jkk8KLLDa *//7:eddNff&qaaxOOO"""z"((Ș 4&.\ Bvh׮?\Hֵ֡lܸǏg<ݝH3X$22(/*Xӯ_?{9^~e#/_o>-ZdvKVUUITc7Ԋ """C6( P}iӦ ]t9 |lM6qacVcNj[n6;+O,f%+ivUUUΑ#G\>̑#G2z{{ѣ:>XڡC-:^}U|A&Ò̎$"ss{8)// 44xO\\zr9y{{ӳg& IOO7:y뭷8 BCC 4;6I2Q2;8ۘjs׶bbxjtt4L4G SNK͍(c~99/++kײyfgggvJ>}x'fGiQ\\\ K˸AaޢEx嗩(0`$ %''QDiś8[\V#))M6uV~gO> 8xݻӵkW\rnnnM7d~iˮ]ضm믿777zABB$11jbg{VZZʞ={HMM%--"\]]ر#;wk׮?Ν;EDDkTi= #,,&SVVft9|;f׿RRRX;uDΝܹ3]t1:if̘E;HNNV 駟~/^lvڻw/IIIl޼m۶5ݻ7{tM ..."I&rRSSپ};۷ogܹuGݾ}ү_?J5.lNr 8""brrrHII!%%4ɓ'k׮tڕ뮻'xh:vBQk9rCvѵkWuFnիzRDְY$< =zo߾L2}ҳgOm\*"bFŨQ٘ԩSE"VbTWWsUW1l0ΰaôvm߾Q DC>bڵ5kְvZ֬YCNN$$$0|p}QGLL NNNfGn vm[]*n'|Byy9$&&Ȉ#ן71] 5b'={k.ѣGÃ:w̨Q1cH=**J] D.1ٔW`֭ĉu׶H$..|+X,у3|#_dĘGRRR1$rss"!!#FcqسgcŊdffB^ݻ1.˺uHJJ⧟~"==OOOÍ7СCILLTVj̘1\ٞJbȐ! 2ĸԩS$''n:{|A  6C2p@\]]MLoرcSL1;8}RQYivs37fҥ|ǰaxGHLL_~*rPYYɖ-[Xv-k׮駟m2rHƏτ  19FN8QM1Zcǎ۶mc׮]ЦMbcc{%66x:uꤢfb4|("9rookc&Æ '`ذa$$$hbT&L 8`_2|\\\ӧ^{-'N*J2\=#?vpim9ٰa7nd׮]TTTJ\\zk5k"v#**(8q(JIIaٲeTUUѦMwΠAzSVVFLL &L[o{W}ˋaÆ1l0f͚Eee%[neڵ$%%/SXX/#F`ܸq7hC\K%km6.[ Z)MحZm7|ҥKپ};5cǒHϞ=[",q,555ܹ5kְb V^MEE L0' ȕԜY,+62cƌ+"-͝I.,eQZvbK.$$$ЫW/@ǎH"?3۶mcΝTTTE޽~ѵk^WWرcb,\S6ˤΕ>_/\~}:_7OUUݺukw1;R$%%߲tRvލ/cƌa$&&""璝ڵk8r7 &pjf_˽fq(Y̞CTVVm66lY~=ٸҷo_ D>}Um(rTVVJJJ lܸd 3vBK2vs5jmTD#~KtR222 cԩ7O>͒\L=aYmvcOOFΝ;7X ޷o_LN+"WRUUvjPsN*++aȐ!1#Fп=x ={'4;NQA/u%--N:GZZ֬Yç~W_}Eaa!]ve„ ?D-jt^nFncxk,8QяkO8"" Bعs'˗/g_ C _~1AYY[n5 6l@~~> 6kk=zXEd .ƍcȐ!=|mό38t Z.\yTTT8fKرØg4zߠ&88۽{7v̞TUUn:TJJ m۶eҤIp ;՜y9rի[HKfM[oy8uu9Hzz:/$$$tR>RRR8z(o&Lh5'A<==8q"o6ر3fdKyW0;@xRN1ZZZ.v#Ì3Xr%'O|<\}*i\]]ӧw}7_زe %%%lٲW^ym?CرcyXnfǿ(;w'`GDPUU/=ܣbi6?3f͢cǎ\}$''3{l8޽{y=zAO?ǧ~'svm|Tk\L&p=㈈e w޼koFnn./f֬Y$&&jX$ >z_w^z-yWٳ'u]|嗜8qWTmm-Vb1yd:ٳ9r۷o祗^bĈ*@ ++vb„ øqصkWX,, {aРAxzzȁHMMex{{sW?~=Ə{^b~eAQPHl V,QXH P A#'Scll1 J',@zc_]`e>>ptt? )۲e |}}A%%%rwޝ CGG#F p$%%᫯BBB&Mmb?PUUw\?1l0c0 D!LQ= ;v,lllevw;"èD^b #22ÇǦM 1~x;"0r\pśvŋ#''sիWp|7ppp ߱ׯ|}}qÇغu+{{{l2M.] \( ȱw^/; UTT ((vvvxwq1xzzu-ZN:aTUV2e N8tlٲ?Ƹq`eeիW7&`73 4u999駟0l0mHNNܹsٳg8|0f͚Ν;a:tgƑ#G7opww9FWe=z˗~zʕ+Xp!$ݼybX,!!G㑔c„ xdW\իݱb C$&&JnwvvFBBW<:u\10uTEEE Gލe^^^Ezz:PRR%Kp$?uz0afΜ\\~7o^몯feeeöm(^ Xh\zURΥ@D!`kk;w웯/w2nذAK,AZZΜ9#f= ٷΝȑ#9?~x,[ yyy*󦱱EpUbڵ>FXJ%4Mjj*0tP0 T(**J*?$=; /***رcdggGht *//;42 CcǎQEEh V[jaulɖt1>}:jՊP޽iժUt5z%iFhϞ=4vX'---zi۶mw:/E|Gii&_4e Kƍ#GPQQҥK4i$Ң;VTjEEҡC‚~ig̘A7nc'S˗%SSSeʞj}ݿb1ݸqCfEݻ7ݹsG̙34rH9_tIw4dV4o"DB8MDT\\L , SSSrqq]vӧO9Zlk׎Mjh>L:::M Oc^cƦ 70MGB^7BCCѵkWL6 ָq._'Wi2o]]]?ĵkСCL:]v0 (D(F114REE~wM66mҰrJk[ 8}6a@ IDATԩq)dffѣh׮VXX~aǎD= ,; 05ٳXt)Qf˗СC|?~< {{{#2 /b1֮]ǏcÆ 8wtwww8X؄M1|aaOжm[̚5 D8r\]]ѲeKc2 pssCPP222o>TVVm۶ܹwLN"""0`x8]]v;^%  KKK,].""SLر)ӧ-,,dڷo,6&N(S+"""$gϞYjۻy6lgXק\8z(.]={HT1~x8p"m̜9;wĢE8娍Lչwo7. uV$%%ׯ_{g3f@%Dݺu#9r1b0)?D"Qi"؀06~7#cÇqZ; 8p ~7ƢpuuŸq㐘w4a s# YBQ4Fzz:kt(,,ݻ3\t gfa8|0233qyX|9ڷoӧ͛|ǔؼy3>}8 (--ƍ KKK0LDDzoǏ?Ɔh 1D"͛89r׮]C׮]i&|x阎XwaFAϟǰaЧOܸq7nDZZN:3fА ѣGI122Μ94lذ׮]C޽1bnWH 2GF6mpMDEEӓ=TH򓗗u)))R@,KTXXXL ߪhBn7gggKn]vΖ|~a``h-[Jݔh^LLL>)t>WYYY򂕕% RU/gggXic8vJJJd s5dnSN;vDjjcԩ8tRRRPZZ ???d|`Ѻuk| Dg*a7 4ff+TUUa͚5ٳ'>} \}@ 8ǎS[|S՗жs]8|0.]GGXnM pg Th}$''?X,޽{D\xgφYuG100@n퍸F͢j8v򵾚rԦ,.qcx! {{{\pCbҤIr1a#77K,; ӌ⣏>ѣѥKܿ}LMM&kN-L,^+Vヒ7nM#!1 {8 0"##1x`8;;/^DLL ΝhX]~!i*v&>* aee(//uD@,COOZ AW_InZ^hy! 8[P(D^|rĨtyҦM|爉ADDB!5&ւbС ~:N:;;;5[Z¬Y0k,XBwfff͕@RSRRm#==]5{l߿~4V^uH$իWQZZ*ɮbaƍѣTV\ӧOKlEhӦ  OmE gggD"lݺ'OT6SÀpiDFFBWWC ܹsܛXaر|Ga aBU~~>\\\vZ[nСCnN˗/9s`޼yرc3ϛmu 7 >oƚ5kwaرMS0 ,#$)((ѵkW\zwƣGwA,f? 8r0p@[Sżi ֭[|2䄑#Gݻ|w}h:u( F{~̝;۷;Lܾ}}qY8qQ{Ls! b }#UVA,C[[ga5kL&UiӦ_~I&T3gb׮] ' ~טqE,ZHRh"ڵֿ!!!R>@ /Mqj2h _pq7߱8;}4ڵk'wa)!**aӪQ@)&;;CVVV7zI'FNxSؤVw{|ݼy,--o߾мy|@j?dUˣ<ydiiIm۶*//;ڎ1aaaԳgFN::v4iWʕ+4h ѡo^|k &P>}h'Oɓ'XlڴD"eddi&/266&jYX3a*++iժU$;ƘGHHBz0LsXCFFFRǎҒ~׆S }nH]mZŔ/){ ۗ .Usטz1j߾=Y[[ӵk@ק1cPNNN&ksww~A999zIǏl*,,0'O[/8{RFFeffҾ}Ғe={69::'|";Euʊ"##Ο?OVVVNDޔLeeeO ^?ٓJJJND3Im)-_kQ_9b]C?/^d:z(5y',,K'O<*--xZf YZZRttTIT\\LQQQ4j(ZjLegg3ÇIˋ*טil@ӴtDnʫ@zj&]]]ziǎ24`200 0`:uJ eee4tPŔt} Q[GdݻGcƌ!CCC244$'''wL!ӌ3ޤ_W.]W3АRSS%eZZZ.]rsY6"P^ٳ;F6F-j auS׀0קc2h\{rMY.pWm,՟ϟK%''ך#P߲C_… e)u144R%33\\\H(ӧǏS-/FkM$"##%>őH$";;; ^eDDL"D"9;;K^ӟIj%/uS[^E=vؐ.Rhhhgdd.Cr.!VOOO4t}KՕ*U```x%bJKK7]m)oחWOOڷoOcǎy>M>IOOb1y{{SBBt׮]#ooo%]]] ԻwoڱcG~&ŋS-8{SFFFl@Ӝil@ӴOxʫ@>>>|r"|2[$ڵkdjjJLȠ@2117o*o!###QvqNpxx8uE,>>,,,ߟ233);;魷ޢ'OHgϞϟǏՕf͚pQQQԲeKwA[J׵kWN^_nE- )==<==i̙R$$$PhAYYYH RѣG_E%%%tN Vgee%ɖA{aCҥ Sqq1ݿ*Kv(ŵh,dhhH+W<&U]CmCE ]_\sN0V\IϞ=RhȐ!ujz/MG=zȝepͬȺSdVoCys*ݻwښKżpqqA־! cdhhH|GaJ۷/ߟ^x[?VMД>ʕ+IWWbccyi_SEZ6~M?W0 nrM%///똚;uv=P7.Y,rWmMvvvRVUU-ݹsse\PΚo>ݺuSӦM Pnn.yRQQA1qo&}}}?bF1F%>s t]j$(*0͙&o0M˄ DSZ7Ȕ>}  c ڸQLV066ڷopUUUԶm[W5ixj۶-ԯLRd^KRem6ig'/͛]yڴit!ibcck|}>|d``iLpVv(fOSY^^N2>UVW{xxHɓ4fgCCC*((PY .@ PY|>TUU۴rJ255 .L[1뱯˗/K>W?Ifٓ'O\6jHzZ_\sʼ9&))Mh=Zº5Oꟸ4\0LcP/_-9;;k.R*E7گE޽9sF9e\PΚH$I> BX\\L , SSSrqq]vӧO9*++ё:w,y899ѠA40zK>};èDEE 0\\\RaÆT5 ؀9ӄMـiY_?W esssDcǎc=zD nߟx}Z-kPv(<<\fڶmRJJLgϨ[n2dR s̙f""z)}bȑ~l޼OQdٔ/==](pm/)),,,KMMTRR[u[ Au\l@h3NU^ڵkKlÆ ROSԩIMK'ooo]zik;Fr=USXX(\YY)k6ɩH5ѣiȐ!t:/4),ٵ^(Tt^fG吷]k!לNj_~ttt~k,#Fq־:! fjٲ Pcƌ'OC5$[m>z.Q=dEVDN{`i ^t1֖{4~w]}qe~Wb۶mRT3f :uSye\PΚ?.3 S5ѣGÃh锟p=!))_U[x`i@:::`0JQJ-ZT\!Ls 4%: jZ,˔ //O9##2ӵmϞ=kpׯ \Ыb "aݺuAJJt999R?2eMzzzxI:]\\pE#G0}tԩS Ø1cʂ-uٰ)WzUv-[v퐝-5:QTTbH$޽wa4\tA,bU^BBB$[+++{n|iJII bcco>tY#IEr^_!}y[_\s?~ܹsajjaÆax%ӺuZmq]/fVd9ٮ ٟ9]aիWyӧO#.. &),,Ė-[pBi&޽Ç+?iRoĈ(//ǿwFSBA6ڞ&`i"##ѿX[[Ow=PYܯr[L>ǎCII k{ԩsǎp=Ƙ:u*:O%Uvvv믿۷C a"Byy9nwQÇCWWw;Ge˖pvv; 0M4-@~>b"/եm۶̔)̔{/^@__KV0k,̚5 +VrssԼ\ttt͛7 =zӦM!$$yyy~:՚"W^Eiidy0UL]t [||ʕ8}4p eUP(T:dy.@W 033äI /e˖HKK_YY)'N@~~>0h ]^^^ g*))AeeԍJo>h>לزe s"((HҎPCTs]/fmu;psb;5f? (,,-|Xݻwa$[nEee%ϟwyB!1TM>w9Up8Vh65\0LSUTTHw pTz2Eԧ_UhӦ  O8'J>;;;#<<\:E"n݊'O*Om QTT  6FYG;0M kI&AOO( 4Al@ӴھR>} ,,LG>}\[ok̙PJʜpei\Vie IDATuN{صkڶm+cjj lٲCV h5Gdd$VZX mWOkwQI규4mUqpp@HHL' >^EDm 'QM]Puϟ?x%v؁yIGhh(ЫW/ٳV8O֭ѡC/45\}Fmǁp)S.N2Oƅ mYj ?^\ Զõϩvj~(n޼<ݛ ZZZ3g8rCCxN<@ }v,^X扤 N:޽{|P7xҚB/::4|U M)Bth5\0LS;֭[Mz #I*Q4K}_Uo3gb׮] J2?.^EI-Z]v!??_xܿ#Gۺ1۷V²Tk*9d(((P!>|(L2(jCD(K(R QLE.Q`JB_dbbBL̤Ç)ݸq߽{\*Ե~|NNN={);; ),,,--ɓ}\:׮]K">,U~aѡkrZfer8997%''SYYǓL}ԡCڿ?effRNNS^Z {: "[[[zYU̙3ݻi*|+))!WWWjժ|!">tBUUU|GQ?PzzRwLJVMyyyԪU+ZnQf(''ZnM9oXXq}>"""H ow!-Ңt va1z@я?HZZZ*n.uO"`.csmKŔ)" .COOڷoOcǎPiii޿ON椧Gb)!!Ajk׮7ڒ. Bݻ7رC#مܹSm:uJm1 6eouv\OOο5iz|SZԦMZf Q :eƺߴ!LIH7f4233#777z%qYx%R6mѣGk㛡!߿_mFqsi.jӧj!rrrhdbb"w9èKLL Y[[ihdjjJW\;D["EeiRwϏVM˗/֭[s0 qQ0èAll,є)Svh H@{i/94\0c@ѫ@޽mge1H[[Νhmz{{H$Ҩs.44LzzzdmmM ,97U=!ӌ3ೖ-[ĉѣGo8Wd~EOC٬u 6KHH<ݻGcƌ!CCC244$'''wL;wѣٙΝ;yܻw\\\$ȴSWdRer빖.]4` dooOC'HD#FtuYAye\k~~>-XlllHOO,,,h̙t ""뼾zj[z=3˗>S $mmmJMM;Zy8+7'tyߵ45 TUDoܹs2e ^xw$iJJJ…  Aǎ$^|w ajAxqFxzzRu+/^\]]1uT3lݺ8 lDEEaذaܹ38w Ba6`[M=W0 T޽_||||0}ta8y&6n܈ S_~%vhm޽5jh6e&L̙3ׯ#77͓^^^Ezz:PRR%KHM|HJJBbb"0uTNy_ "۷O:ZA^/,Y49sF=ǏGRRRRR &ӧ0e|Q\\ gggN !!AHLLĤI쌄Nyb1agg"- QUU[[[ܹsbXz|=ۆ 8Er\?3ѫW/cŊ8x RSSѭ[7,^Xnƺv9s&Zjk׮gΜABB :ږ%3J@@1vXܹ820aoߞ(jUZZ }}}c0LV}/i"* _ŕ+WԔzE>;4I/ٓZnM7M6j}/{Z)(.B{200 WWW*((PYv9z뭷H__-ZD|GbSNѨQАZlI}ih޽ԡC200իWSyy9߱'PH|GQt@Rw}Թsg_믫_|Afffs0oM6Q-hƌgMWUUEOTTTw$:MIti1-5GS9W0 zCHsQvԔvڥ&&C ,;vI&&&diiI.\%GUU}$hʔ)욑rssT;wZ:Zyizn˱ct QQQ8{,V\ @w4[lA.]wKKKkoAau !l@{7oDmb1"##qFرݻwGhh(߱F8qݻwǮ]#22|Ǫ.;i0h t@HSy!%%;w͛7ѻwoyyy|GdTTT̙3ѡCl޼HNNƟPlٲ%&O,u) h׮4KKK(֯_CCC|g|Ga NNNĉ1k, >w;h|_~͛7zjhiiMm_p+c<`мa122¦MGGG̝;XrM üߢcǎ7oƌxlܸQ#x/F.]p ///XYYA[[ΛMLL>T- &ye(2QebI H033S͚j[v!;;s^;$%%!99cƌipS4[h!1u?~ܹsajjaÆa2nosGټ4e=75ؿ?:w?3gݻ'''ɕXdd;7W(** 3eBc௿NVژ?>bcc1tPw8tb0 aaa8p &M={"&&~~~Mⱉ 0ka+JT7>}/@6m`oo۷ɓ'|d O?Ǝر?Ɔ `iiwL>DGG֭[|GQΝ;#>>J>Ҵ<{ ?-[VTQSSS#** ݻ7F01yV\z ?WJ=9kp #8-4s 0򬭭O?ѣGXp!+++9Vy7Z2 СC?~<ٳg#!!wFǎ(7nD|| ]t͛ٵnDz*JKKADJߨmffgϞɔ|~UWeKL5JJJ$ӵnZ㲙A 4iӦ  Omo6Peuc˖-HLLDRRΝ L6M9z4L^^~3n:^m6ݝ(jW}ͭ]v<'a0M# <}wCpMѹsgر|c^aԩ\]]ammaaa;B=0zd#7p(o߾@jj*aiioְúup}c2 LeggƄ кukXd xZѽ{w߿(*ѹsgCiBRSSyN(bÆ 066Q7X޽qUŋп;v o`s3f@ǎ~,[ III?~mX(B m#1 0<ҥK`cٲeСo èы/pqm۶OGJvӡCc֩cǎسgbcc1fZ VVVm6iHZ bگ舓'Oʔ_xQ+A ʬe>NNN|L+WN0bDDDHMs]t]u'NA3gĮ] OOOJչ */433Ô)Spi\pAf޺9zj?e%yի1~x<|??VXX={`;Uaoax5PQ)!**aА( $.))H$Q˖-Ã~w;èUee%]r|||e˖OGѤIVwaTŖlK˗tɓ'Ӷm(**0LTXXHtR۷/hтmFiii|GTM61Ei6m"+++zUU޽{F5_W4 sN0[nikkSvϏ\w,i4999@C !ԫW/ /^ܡ;dJHTJ|aabO 7I(mذ݇0Dbb"ɓȈZhAC m۶QVVVPP@۷o.]zwhÆ S5*'''d*++x$yv剉ԡCڿ?effRNNS^j+ʊ"##Ο?/9Hdj+L={ǏSvv6RXXYZZɓ'%EGGSN(<<ݻԧOڽ{7uGw^ȠLڷoYZZR||<Ԕ/_^[W{Vt{44e\+rrr*--gϞї_~I2mԵιS5/寭MZ~=uޝP׮]B)dÆ dddDyyy|Gi۷o'333c04'N TVV&;6 i>*;;mF#ԡCZlaT޽{tR"dggG۷o٘7___>|g71LͧԃzIk׮ڵkёD"vѴiߟ>޽{N@@ӧOgڴidggs1ܑ+W0x`"""kKkBzz:C aÆ XB8BX AAAL>I&vD!nիW9|0aaa8p*L’%KXp!S;b;I0oU;BfnZ!aaaqQqqqߟenUN@9{,Fb=^MM }_5^̙ѣݻ1vž}ضmQz w>}w^Ξ=,X9st./_J8bܸq;7EvQ7ndh4mׯ,D{ +Q3G47_ĉ~n݊13f{a̙٩Wdddp!Ç)++cСuV(((P; &g?q^hN>Mtt4?#[lsss|}}=z41gggttt~B;˅ ddabbrb,_Yfqn+;v] B:7?1MxwbٲejGx{{n:z-~GGHHׯg̚5{iӦ멪ĉ8p 3g?$((S59N nvcڑBt1WW^M~~>ǎ#""'Ok.jjjd1~xFJKK!22'NI^^}aL9r={'o0p@fΜɜ9s>}:jGSիQQQ_O>D8=|;,eÇ#sׯgݢoK=fXq啔p]t]u|I:: ˱QvݕcZ;N\\ypwwW-_G޽￟r ۡۑ8,ĝy!U;JSWWǥKVvJLL}}}7^^^ʵ K\\\묬,lllZt=z4666*\quue…oS;y衇(..5:bwq=P\\ܮJ2^3iii 2-[|rqG #44CQ[[{j=Tyy9'OTv= bĉUuG9JALa !Ї[BtgCoܹsIIIZhU|N( ohllZ* f$''ʾ}8~8555Ek ===y>=WǵkaرJGenꫯ+W`ffvq)ƎKll,^^^j]tK/Ddd$'!kArTVVɱc駟8uUUU2i$ƍ/#G_~jHEE111DGG+?CCCƎˤI8q"ƍo'=׷s1rv3!>q/rK fqrx"T000///<==quuU.=y-!:Buu5W\Q..]RNj(bСMzٰa]?OVZEQQӘ#;;;˜6mZ}ߙ'x0eV'<>>f/W;"̙3DGGsbbb(//G__ÇﯜE7,bKXVy*Yw,iMii)ΝkqIHH eNwww<<>>lڴ Dt}CI I>ݺљ<ވ#022R9PK}}=iii<^rr2PQQC i1ۣNL߉lif+EטBHQQ2h>6О033kunGGGy&:ukTg[~Ey₳s9S466rEes(RRR_GŨQ"gggu !D={gϒ yĉϝ;/|I䪢KKK>C-[v!MG8;;bŊVKABCa\HJWWt;W^%::Z'11Zz^^^+r0hРU֒b,118RRRhllD__wwwewhooo|||dKG-ZRǖfBܹK\rT;999\rd ==4222(((Pkhh#vvv888`cc5bii P qJ&77,rrr!##4233y4hvvv3h qrrUN#00Ԏsۖ.]J||. +++ZT233 55 233!77b+Ԏ$B1[?;;EHJJ >}F'_[YYakk+333eUL%%%䐟O~~>yyydff*EYYYdfft_?mllppp]O^U|V1m4M|$fdd$7o===ZtÃ~L]mIN;XYY.nnn2w\|}}5j]۷oDFFua۷ogR "D((([[VUՅGO>W_ͮu]ۊի$''sQөU;`lllpttTDlmmbEݚϧ4 h4h4*;ٳ[,055UuӦM_̙3=Z8BV<<ɓjmP!Fk0h%??E2n ###kLLLz܄PwQVVFiir]ZZJqq1eeeJ*[~h?~-sss,--___,,,VVV`aa!;uq:::<쳬Yz.6mڵW_}U8mXZZr!)Dرc۶m ::: <ccch4;w===m!<4qk Epeff*syYYY)2w7a„ `# !kkkƍUUUh4pEOvvvyJXR;vbSSSLMM111Qn_|v!L)Æ jmQSSRN{R3--H233)))iy:::-&,,,333(uʢeaKQQ)KRLLLmmm{{{cS-!:E,u^@8K42t||ii)Ja测 !>>^wQQMMM)"x׏}ҧOLLL ߿?zzzG144f@cc#%%%PYYIyy9uuuSWWGYYUUUTWWSVVW[[Kyy9جyHeddŘ0h ߯ IaPϲ|r|M>S^|E]]].\ȗ_~٥ Bz͌38pVR;?k͍֬x@(Btz߿{tuu6m7oo\TTeee)dffrHڱuͯ.k`mmm9E͋KJJkm.ȨE͈#-cDuS\rv$!044WWW8<… TUUɔ)S?~<-:D瓓C^^ Zzhoзo_LLLӧ2j``@TTTPZZJuu5唕)TWWSZZJEE555{4]]]}իʹرc[󷴴T9kіǧ|2qqq$&&Ǒ#G裏nܖxxxk mllx:Bq4 L a~Fӓ/m`ڵTUUo}G0w\-\r޽{3hРV=X[òen,_=dիs̵oQ&;r_K^BBBˆvӧPfejjJ^n9655Q\\|+**ɵZ*++Ň]F___99-v8p -N_xl2^}Uo*PG^ b׮]V;z"/'ījG>\)((++SN5/4hB^Jyy2~())қڢEHFFF;얞kp/8څtʢDmq~)-++ 1zzz~ajjC.FFF8y!vc^",,[Ο?̞=OOؘGyM6wwz>/TA]g5g{1JJJlǖ$..;vBh!n ??<ӦMO>a033տ㍍vh޽6;;6ӎk-hURRBcc2^)׭ӧu/dСN7NםW )DE8Cc!=p1`L2 60k֬.\- Xrr2%%%TWW+UUUՎ7<2ym󫷪VӎQ痵Nov^BCް755FBB\tWIjj;f``p]H‘=XyyyBk?} ggg Ƃ GGǛ^'nӧزe T;Nd˖-<ʚ!ĝILLSnMh.=>8D8ڝJJKKR*++[d\[Q]]\UU'ӚӞmmI?bB(څ][%VvEeΝ]tx֬YM'S-<< &p9fݺu+O>dOJp)MT;J;q[ IDATEEEup B8h57ZT\EEo.v4'boE{W^"CSSSeѠb@dd$!!!ڵl\\\ $88`̛7O8ϟmwcmmͮ]X`A_ 66KAddd{nBBBŜ :*BhmtCC2uB`mR]]ܚZ1卮hfSv)"` oGՎ${1Y*++9qaaaqttt;v,AAAL>QFuoNk6/|^T{nhu\ 7HPK;k;ohc&G;|nUEZ;wmCCill$33묬,屖8;;3h lmmqtt{{{BV[[t@ѐNff&iii$''ٳgKQGP$-- FCZZ&\z ;;;pppP FDBR[q]Gff&999Jѩ.VVVJuѰh 7o󸸸CUTTܹst YG?1cưqVѽ\ m,Zv!:Zm;CFF<jp,^>C^yRSS۬U,0mRNkbqXll,!!!|W$$$`aaٳ fٿs뮻8x 3gT;m)--5kK/q;z7|6?"pB9c7(E Jg3g*ӄm&MֲxkBtRr^JXX~f̘3>}:jBNl[,|2/_FP\\/mllKKK,,,nqJGD***!''F BDF nٹs'ׯ'66@^yԎ)~z֮]Kbb"w|֭[?^@Yd+WuVV^Վ#hE_6o`ڑ=//yyy8p/_~; J7Yff͚.[ܹsy|UVQQQ!u3ge~;EDD0p@̙իD߱%8@Q8v$!Mz!ON@@jGBN$#668^JSS 2///OӧmllE]PP@RRRZwޘabb1FFF###LMM122j1333v166ԔKSqSjkk)//R)++"vYYeeeXii)%%%:::cnn Gn0`{V;"`=租~"""B泅hcNbРAZL,BDT_>>`k F.!<<׳~ {-Z#wyP-44{w={0o޼;:8,Dh L@}~G!ݻ={FCC'Of޼y̝;M: uh߿[8… 9;vpaooҥK2^mp))Nll,!!!g&889s9!D*X|L_K!D;IB:t$ @ @HLL$>>X咒BSSӓCͰaAmEHaa!ŷ8FK郑Ƙ)zzz ߿?zzzCCC ǘ zԴc=zJJJ*)//b()) 1eeeS\\L}}RQ^^jw@"$SSSC[abb11j(|Al٢vwƏϛo+v!:\{̝;>}k׮>F BDu8L~ j8s ֭cݸzj/^ܣZAw׷e˖~Ο?G{3!O4ьa !jBEii)gϞ=8p F;w.^^^jG(88.^e>(Ǐ'!!wSkADDDS.::0k,&E _} 8880|]PA,f1G8z/I!D;!ɄۗQFIB";;x넄Ӂ_<<HݼPNnߌ zׯ}U tuu133CWW###LLLZz\ە7ARWWǤI(//'** CCC#uLƍ>Xr%Vc Dto>QQR+-INNfƍlݺcccz)}Yvv^JKK֖^4' ̄h_G9yc4#| '44pvv&((Hv!xzzo/ddd{dz>{GRkƌ3pCmcx͙3N8v!dǎ\t GGG͛Gpp0F qԳ]cڑBtRD||<O=zL틟'Ofu]w9Btu8bcc#99 . ///Ӌk]ayAI"@iiMnMcc#%%%fs-<[166V~ovnkۼCE䥗^_ѸC1qDjjjLHB=233ȑ#L<{C1/ѥqF^}U>#6o̺uyWdgvfll7|>,[nU;p_G!c``ә>}:|gϞ%44M61p@̙Cpp03g';^~e֮]ˢEU;-駟fڵ<#M6'at{BgǦ7u{N~x2^ 6P[[qSHKKDEEǀ1bv!RR>9qqs9{S!z ^||B9}4?SSS Ϙ1cG'>>#11TS:}xxx鉇2'DGG3qD}Y֯_vUZZ=åK8t#FP;j{H~_}e2 jKklldlڴ0FSO=#{9speΞ=KՎs4i>}]]]#ݲz5ߑLB}}=ǏW;i^BVV^^^B_WM5Y͇|}VbڱBtr#))SNETT111bnnΘ1c7n;VvBt[EEE$%%qe˗),,@__ggg \ !PEUU&M'Obbbv ȏ?HTTjG0۶mcŊsNՎ$D^!۶m'~c D,;w¢Ey3v˗qttd2{lԎ#ٳ w^Ԏs[|M{=Ο?%'͛G~~>jGv&O>R;|W(E 1Kz}ְQKĕ7BYQ\\ŋV.455acc/+τBaa!W^mG^^KчǠAd'a!=ʕ+裏8x SLQ;N(((駟&$$^zKhG}49򛏕/f s窝Fn+::7sNxxꩧ4hѺcǎq}akk7|ڑ& (0DHB.8s {a޽\xHPPf͢j}Q8籶V;-LJ?t㈈8r'OV;NqafΜɱcǘ0aqDԼdΝ*E -MBM4Xe.o/Co!Po-h4DGGs咑>>>닯/]!RUUYYYvr x]100P!/ :;vW;Nػw/O=:::|̘1CHBtJQR__ Vo>^ BDSm9cƨFn-;;?͛7h:u*?<2o<222ϹՎ$?T3IPINbڑ]|={SL{%((;;;#v;1Ç7tɱ ;v,x7Ԏs˦N ?rcĉ׏E 555:t[JJJ"~!C[jֱwx>wc !袴 : F\\.\… ;wϓ R-0`хBjj*)))\zRRR<''.899Ii!ҶmXt)|/q]||<+W,Y7bj*k_(nwe\]8yN$DW[[ݻٲe xyyŋcnnv.~/_|w}}}}c !,f?eR!DRPP?Hhh({/// ߿K/tFǎcԩ[_~Y8e˖-<:tiӦhOL8Q8]3g'Odرj\uu5&$${R^^ΨQ dɒ%75.|"`9Ѡa-kyMoc !bҔ#GpҨC__/// ÕY%쪫h4vh4dee)533Sz`kk]:C !q[.\ȟg֬Yvvɻ֭[:t(6m" @XBtzQY9nr{ IDATosW7=HLL ?عs'̟?SһU۶mgݝO?ÇI<&6G!TQ]]Mxx8|dddٳ d̙G]_W?o>fϞv۲pB"""Z8dv.oܸqXXXvMUUUFHH| ?`>f%DG|'a#B!~]zz:G||<.\A舮.Ç[[[ttd!DRWWGaa!SPPvnn.MLL ,--8p XZZJ!׿2uTq㴛?1wSNMI̘169^CC\?79R"ztn((N$DR]]Mhh([n~Ύ~+Vv.ҥK,]_~^{ CCCc ",wN/Ͳge߾}r :u*AAA̝;+++#v9MMM,Z0N:ڑnYQQ>>>xzz.5|QLѣG4iq}ĩS뮻Ԏ#E {졲R)Q;Qf6/,bڱBt" (⏄JKK ΰa6l&&&444(B5ԐEVV$//Oy>boo-8::ȠA000P !]~6n-79?믿Ғgy{L66߉#G0uT.]Đ!Cns Drxػ~֯9,DKLL>>#??SO2w\Վ%455UVallkƲeU;=.v%|NBqABCC9|07 ^<==ՎeTTT0qD*++\HɓL4UVo̚5"T1Kg2fسgQD7PYYc㢺dY}AP}oPq#HfiĤɽI/m^M>6M&51Mk!q%%*( (Ⱦð30c#(&jy8g 1ǜIMMet:fΜIJJ >D$Nv_%c~Oo !f0(++#''\e{eU|wwwQ111BFS__OUUEEETVV^_RRhOOO C\,Bo{[_kUhZ6m9qSL^ CO}Q˹agΜaҤI|>^nNˆ رcw}?dj'裩 ?f͚~B諳xgS0d*yԮL_["III' 44T򆼂֮]˦M G??prPCe< ?!mjFcǎ9<$&&ҥKeu;wsxb6n8,W|饗_5kܰ#GpaQKoo/'O&<Dr0jٱcݻѨ@~a.QqeZ֒N: X]BAj.EEE~qq1===XYYt #::I&vB\gg'UUUv]GXXAAA B^?^{5x>쿏lhh`lٲݻwܹsy'Xz5...j(g~_PQQqSb z=lAz:ʕs]#NOObÆ |駴p=#pvCZQQo>#55={`ii@V\)B܅f/ֲE,R4!@aSVh4:}T^q@=JKKVtFAB!0'|»Ç '駟GnJee%ǎ###L"44ŋxb.\( ) 1|s=?~Sc%"čc࣏ - .]ѣabX̴UJ!F>3>كIII!X~YjF!_!7?gܹ HLL$11EvOy衇xx7%%%̙3݋%}ƍǒ%KXn y6mG!;; &]bٹs'޽kkk.\HJJ VY6>C"I$y.M!7hiiŋ\x|e+DEEرc"""_bꢦr~=̝uFMB!]tnʧ~J[[+V੧bѢEâWcc#gϞdffAII ?ٳg3k,fϞMxx !nQbb">|+!nUQÎotuAX̙s’%vBlڴ7_Yr% ٩]␤jO/puueZŋcoovB'8JiLa /)`BCZZŋ̛7 &6,~!=ǎcj3dDŽ ذa!]v FtO]>{<+BQj&.%a~XYYJDDʈWB^Omm-eeeJGuu5uuu퍏AAAh4~m**!B)ڵ-[k.ژ:uPNnn.gϞ%''gr9`ƌ̚5Yf1}tY@IĩS2e ;wdҥ7x q;p?DD"3f̙0~M||>+Wb .NGAA*Ӄ=L0bcc_,Fuӏ@22L S@aS8d <BBԮVZFGww73g$99d=jmƖ-[8pűxb/^̜9sM*}]J)e! ygXrl=!Fl%rI={6IIICg~ٴiӰb,[G}u`wҙ3g:u*o&> 9F3}t]PAii)[l!55 \]]nN{/j(DyOO=˽X AJ!F#eee&瓟OQQ0رc;v,ʐЇj*;***<̷wѣN_sw,"BWTTٿ?777.\RZZJYY_\\LAA!ƌCxx8cƌ!22Xd1!Fs˧~ʪUn9$"ĝRTG@V= `4 L` L112Zp֭ڵLŠ+X|9C~r::: ==tppp`$$$ɓNAC:ֱ-8D2ɤRb% !İSRRž={HOOgݴMrr2 .$%%LZZjtSvիy>?OoMnnt>'|"##.G!%%%lݺU DJJ K,5&6I&}S***">>S!ҥKݻ{~ b,\sl 28 b5y>3gJ@d:uGr222hllɉ'2w\̙üyK(!n\r&lqđ,$$IKBaIղg>3vM}}=$&&r?lW:g_ox.馘C! |Gݻws.GU(.\Ⱥu.GF999FVV,[-[&1r|lf33i#4N:$Id˘t yBqF#_}};w$;;;;;,YҥKR̛xuvI7,##DL͛P~hnnfĉűuVQջ?O^^;!?@bb"IIIbС#tlZ&1yxPB wzEE͍Pe>Bs!JZZZ=???___|}}X-B5Kvv6N";;BzzzpwwgҤIǤIϖkkk=.]DAA7^7FOOO\\\pqq imm. zo˿;8::F́]ooo6 qw`ɓ'Ka) a̘1B"ӧOFv("g % 2Ɍbڥ !İb4&==t:DOOqqq$$$4'JWTTCq |M~iKa$&&RWWg}F\\% 襗^;Nb̘1jsǽ;v9&C 7n$?? V\IJJ sa(y(,\v^0X`Y% !)//0am>`#,,`~@aꨫSx;y*777{:yWzR&۫ B!Ƹ^ NGQQՔS]]M}}=Zn‚QFa4P:v| C۝pqqooo<== {{{ܰ GGGlllpwwGGGU4^Omm-UUUJHZ((((_ɰ0"""B Aׯg/`ٷ9%"A"PZ Wf F`94 Cx“cǎIff&NB@ll,&MbL4 &H*s $2~xbcc?~vB +}"GM41aӘ-j*JCC'==]vQVV''!!{˼Fww7?yy'x7q& hZVZӧٲe jt.O3Qz=$%%[o]Ȇ ((( $$˗KDvd(OLWW$''w!NKK'ٙ>9s] g֭/]Nrrr:u**?O._O~.^H@@AZZ| ,!!r=p|2H"Ә%#'({UWWSZZJii5eJJJLFh+B\VKUU__SSCOOX[[[F;~~~h4ݯG@@ G!jmmV`M X=MkkM3?71wpppe8::HccPN8;;θq㈈ 22tP^^NEE_^^NYY{.kkkuN#`se{+///ƍGTT1112qDeNE1o<ꫯm-!m BqHiiF7H`H@)0o L:w)H~~>Nɓʶ KKK"""pCLL Ǐ'<<+++KR8wgϞUttt`aaAXX0aGdp#(F1qd&ӘT2X3|ګ !::: ==;v#f"))+VvONW^ᩧ/ːkk_2qDt:V^͟'}C |'TVVFRR)))̝;W$\ 4 YH",e)>]B^Oyy9TUU)=Mrwwy`aabd顾0w먯~= CR=džKwT!Bܘ^ h}Z[[1 XYYꊭ-JHﱾa 1kkk\\\Ns@*xs}cƌ!""Ǝ/666t:Vii)r~շb 4Ĕ畠ٳgyU"DDD`i)m=|'|WDDD@jl))1u)/7m+*LMa@S@3H́i-".]ɓdggٳg)**[[[fJX$$$DVlJee%YYY撓Cnn.ΝSNl5 111쇅\CS=d\|ɗ$-`-ȴ˗Le@8BoRPP={ؽ{7H/^̢EWmbBoo/֭?1cǎ>`rwKll,~)j0̟?fۺPoBYk0;vTRSS"::x.Q1(G8Qv2pđY"eN1ik}'%+$$P*-I}}=uuu*!~A),G OOOe v zxzzʄC!bAWW--- 7qNII %%%:!ϕωBCC;IzF/H``F^w Μ9ӧ9}4gΜ!??n퉉aĉL8)S'B܄{5kְm6oK D1ttB#զm߰H78;_a442sqQu -tvv*sH$77ԪqܸqL0$((HV3룫 Bȑ#ٳ}qi9s&-bѢEL6펯sEx xV9s+We˖!ՍҥKL<իW]Πt??j3b |TWW+!|qƩ]6&tbqfs\wawB #??V>zhh,$ DRUUEee%ZV׻jh4ݕ6u///nF!Dzcƍvvv䄵5nnnX[[<1s@뎙} jʔGqqqFy/eootttPSSCUUeee:ߓ C,=ybz=>.^HKKro"Ņ;nGMM2`;Bh@[[0w0.1455):q#QnnnXXX(0{{{ptt,--qssWWWqrrR;Px|Lܘjjj y+a\\\ppp‚t:]]]@e}Vޟ ω'q):::'.._'(9#B~~>cҤIl߾}P;H D!jM*VjMupsS5h4r%9tp555ikB ᄄ`kk:),,T"}mG!!!x{{*PG)B"dsKҋ=DM&M4]B Fl% r!F#&MR̛7o޷뿰wO<Đo~777RSS0a%ҥK'++ ~u:666C{/{~_SXX(wNc߾}}vȣ>ʘ1c.Q-URIY(G8BYӘ0a@]GGeeeʤ eRSUUeeeTWWcΎ@LPPrL&ƙ'666@}}=+m]]]OOOOOO<==KnB!pj j;^Cmmm 1_0?F_0w05n1 w;G/\kޯwݭ<;;;3^F퍿?>>>JgFFGah4/$riprrbB"cƌ7 q3 HHH@ѐ>]o%"B3u:0w"7+SOOSOO2u"r@w@SSn(,,0 ꢢx-+]B^hhvr!GٖS .xC GQrB6233HVV̞=[ L,6l`͚57QNȶmhkkc֬Y$''BXX% !nQ7ݜ8.q K,$R ~La D3 @q멬Ld}'燿?)AsWW$M;}WfvvvqۄB;AKSS===477_D)AF#Z^F888`kk.\]]upqq&q+15Kmm-TVVRSSCuu5UUUTWWSRRBuu5555ttt{0 ^^^*!ܾ!___ `0pY?Drrr0 1uTMƌ31c| gϲxbٽ{7G)!ڮ ՙՙB$p`iaG_7Hy/ RXXHQQa4"<<0Fpp"&: QZZ ~!\r9Y0q$("$h"$(2X!STTľ}طoGǢEXp! .mBO222xy-=/~믿ywU%33{_W?eӦMɓ'x1effrqvv#?W!B1!W$HCUG\hxz:W kzk#%%%Qh4E٩jo (N m/mqmqj/yq\cĈ%t$pu#hĉ۷t;^'** `ˆ xikk_G?C8~1}QX~=IIIoK/ @i׮],]T`0`gg7|XZZ/#+M:::IMMe˖-tvv2sLRRRxh4j(Ats J$'&6ڰƚg,fA% !(NGeeuINfe2aaaPy7u;^s}]׎ u !p]6a4iii|JG sFCy0w0o s üuqqR h1jԨB H}}2`GMM Z&=‚kxxxF!00ooo<==G%!f+233ꫯ/ښ'*Df̘رc.W֭[yǘ?> )![46#FKFF2o]\ybXjS\\LII .]RinnV70B``  .3| IDATQ\\o%r());@@@AAAʿk@@tow.# /H>cݰÎ1/搈y? ,w2228rGСC HHH !!ŋzK[ogggCvӦ&y6n?W;Ν;qsY8yd/{ٺuoliiXYYݡʆ!͛7ȃ>(х&<\Nq:ƚ"rYNavu.{E<<٧իW~9YZZRVV6$;Bݻ%K|_Ww᧩۷ʾ}VB ?0j(8I:01!QIzzzZY;Ņ3 Q q~me]pёѣGѣT۾;b@9a8KF G.Wwƍ:\MeCj\xS]]M]]UUU)sXXX`gg#NNNNPPaaa/닅zBSN 1j(Ǝ/ {'狑{ݻw[ofU@Bk !7o,8v0Ɉѡ˯QZZJ[[r77a)_ʊwjZUUl+**{kooO@@FQSjb(,b\N4h!暰H$8!4!V[[ˡCHOOȑ#booɓ;w. s=7yWضmӦMW_ȶmx>,>KKK^|E?޶[QQ%KϿּk˷.o6/€AW_}_wOղcRSSٻw/Fb޼y$%%#vB>zAr}G+Id@`l]؎!vR8Ƶ)rqI^'\ 1+e$6`AHlB u֞O鞞iFTY{tk49g?/7xyx% 1\Eْ:(KB1>4cttJzYl 200=b ix#Ҙ kp:Z<@tXd |0TBJP *YCfR34dִ֔lLRarr!lrxxQurG" `bNVU T@@t100+tcH$˥CcbllIپ};T E]%\͛KXjbﮰxǹ{\z饋/" 'è!3 #3$8T^i}$dH$ZiHb4zYb000$6,[h4K#}Z  U~٪,[>}[n Ie/o&{b 838Ub5Y*V,ǁ{A8'GСC~6oެD.B,ˌ?c׮]r-{l޼[n']̝{/~~nfnƦ| _X۰awuUV[ouoo)yNaX۸馛aώ> ^xἻrONNr}i jrWp5rSN&Ǯ2/& jV1lbkYB)KU><<ЌkaƪX]NvŢ4ĜD"A:v;Hp8<СS @`>A|$t:M\>fCs%_ |0T2*ViJP†. H$E %rptiRI$ ءfivO0$C__N3\z5hn%MEZꫯ}vm3sw׷$B pjJA"qtEi|sJ̏`BCO)bbÑ#Gf̝)ѨFw'bJ%FGGgd#G0<(7poVnf~.k.$W_}59 b;3"c_WnF ~z!l6@>IIAXDJxn^9v[' 1˹l`p=, p11442HFYr '%G122Ҷ˳힓^ANZR1ZS9b*uC0 FGG9x>22LNNL&Ir&c&V+vӉ  BtuuiCEX>[f<ر۷}vz-<&I /. '?8_җ8pwN" tv|~m`,@xf^g^It\.kA`(iD]h9|0TyFhE-[ש+b19訖EFFF4r(y.K"T5Opr'>1 4ɛ$*8a keAִ "NAQ]u~#uV$h.+~\yO<7y衇袋k>-r~?.>϶-l|>{9:c~r]w׾5](Њꫯ?1BҚpbXX,u]\g ޽{?Ξ={(222w6p=~> TA8dZcx1b{K nܜ˹yld#HA8d2 366э_]gr LK]|400@__ *0>CAN< , %f(CmT…-`*iCIJPSMR0 6M*CS%:tHT*E:IL\.G>T*QTڞAfpvz ]]]tww郃XիWl2IA@,cرC"v6l_;.b6l iÇ曹>??f͚-! 0Z=i$zR*:5c*u$)$iP_hPuY\V7]8nw[YD% 4~Ś>Qn7]s\ZQiӽr#S8)8A753 ֩"*eD%`cE~AX2;w "O<B5kpWpWp{X~=_W'???1gug>YSET*řgČ7v;Vg]0!urro||jNCCCC<LywsW/-ԧ>o*x .2~׿5op:\~\s5in/'ayWy^5r5888sF,lHA j5bׁԴaMWEr9V{‰ ˑL&#H豚6k:9g: PEkLJh-d s4tD %^(,Z[(CIJPLI3*C%8p#GDuzrrR)0 2JcJԓ:l6NI0'uD"Աj*['\y otuu/}n.by-}l۶M߸_hnFmRnzg/[bZ{z|_^:&&&nb{'У\.s-p-`X~gy=SO=W_[ĽI29> c/c"Yp.粁 a ؀!'Bxaty^um=D"ZqzR),(rYxP۩Op8L(ҏvBGeg> Ad %hGP-Z0ڥo%[(QUhMĀ)BIJPJаlA`*=C !\PUF:&5%sLNNZ63"RiVBN |ZD">:[l+V"u T*vޭSDy^~eJZٴi˖-[]t:mFRo+_ RAA802\#E$)3::qnQJl)fA]t2/;]oOLLhiU 'GPiZf^. HCb!ra9a2S7z%J`dhG_AL&ömt?f+`ӦM;x'ޔ.x/{Iۻq7%& 2JVbG: ptciAd߾}x<6oތP(tLkя~`i b;m۶SOq\.ry? `P(aPxټy~*:9N̄fOO|lݺw]ڧzƮf{1=C*8^&o6{ًAkY:q6gs簾1x.C.#LQLJ|յ$U0Ooo/M%̙ G,<1-p(yBuco'f(yB%bt3̉*Y^wJ0K*Bd %]$[њ~ D" ˑfIR111$LLL5%zd٦ԣbH\sZfnt:q\|>^/>߯eu_AxL&yxyyyW)?|6mڤ6lDz}{$ q 7pM7յػ6'DAd S)H&|*ռ]<>5HL&N'C0XH|8yq0X_՟񜖂Iv] v"Ccǻ|>T8aNi-PZoP|&1,$s:IWWD FG8Ҕ.2G8!10c飏hc裏e,ku<A8x뭷oo[}QFGGn.>MLLϲcmۦvS,Vl6n6!#͒I&\L&) d24BT*T266믿0}}}XBT*Y,TQhJT*~UH:a}S7 r|>n7PXoj~_]N'7x#z ,5b72aY|.粁 1 Yӯ1  ZkH,#5 өOѨFT+7*VJӖ%I8DB?O\ ͫeѺN ӕN{gTvbFkF'1A%bTiv3Ti25?[ ©L6)d:fddX,d2e:0 ahcIV͆=>@ @$! DꢧA}3 jAA8Uڵ^x^x]va>7򶷽7qFV^-GNg??Oyy΍7ȵ^^ݛ" prN"J$1%D}ܺ,$Q_ .BG'%@Tj*P#JpV|hۧNK%ꢫK$DbdNi]6666bkrU7 i 9( 1HcfQFgyA 2e~e1DO?}-;d⮻s4AtBQ,΁8Ν;ٳg{y|>7o&H袶D"A\gMRf#HDZss!LhN iaFK*έUSH,˨ :a.S3-S%تߕT*_ϯ~y 044$Ӗ xKl oEZg0ȹ t&3999kbZ֮\LZGPUܘ䡺#tQ_ (o;SNHsIPbF4NbzҚjaIh3ڥit3Z_Wt$2\N_SiSZUF&!J%Jn&3_l6.KkJJTn{EWW=AeK/Į]صk ߑ6l$F~oa IDATwytww//{n@Zƙi<:[:'C 34Ӈ9L."D2`8BDO`A't /̫8fFO~6Ca 7JrU7Z[zM@bŊm`0f"~UeNZQR"NenX,bXZj5-yj< \rq{Vch?8BD D91J *APtJpiY+HdVC-[||_¨s"shӶ3zΜiy",,R- 1T њI8vJ8T Ih s*mWfdfJ.B)Z8Nvx^/`~~+Wt.{  ,T_~_~]v_׳n:֮]9묳:  y|A~vڅ窫SD !  GC<T*I6[W%1rbust.WOAd4P)Ot֟d2-4bI;ѤiYbh:- .dhwӿC+ 8ZqGv5e*SEnMH1Hҗa)on:v;Zmք3f>l8CCC,z{{uJ1顯iYww7pxI\<QJSxX,(@6MT9Nn7̦MXlhe˖6©O8>s}a?Wkvzgaәd'F zl6WiyM0GerY"WPۥ(T|ōeۿ FT 1T %fK$fם/Jk3ڥi$f{]AaaQ_*"$T*$cccdY}0 \|>O:P(P(0 rL\> _WR2Jp<>P(ԔMWW===tuuz:C 0/2*:oVaXzD֯_ڵkYz5+Vr-[8a$ oO"HP__d2|[҅A󥻻˗|rXlGQYrVȑ#Çxtt  j*V^իUXj sZ<8ϗܸ$pgB:F KBVMh8!B.] ډҵԠ]裝W0wfa^&WKU SEex,SuCT uo.JhIh z~eNHLLLdX,aim2X,jiT*T*T*yZl6\.Nxן M&`}Y|oRz4g>MN{{{l'NH"hz$Iɤl҈D"mx<'S5C,2#hqdQ$5,{衏>z葮؂@LLL믳g^ynֶ9=== j*֮]˪U8ذaÒ= ''cccr!>́ؿb׫e38>uֱn:V\j]w# AN3u>% ӰztѵF]_%1FS\.G6%NL&f0Jd2I:DL&N8 Y"AY͇a~?>@ SBBN.jDZ*>]eYhhP) %P2Ih! 'Խd2$\q"x\'iJy rBecjjQCI. I t xަs>݄B!\.7\.>  ,b0{e޽߿rb3`ӦM\p\p\tED㺏'"  FTH)IPK#t}:K%|֧2J:]N&dzrH8\HlQ`YFqۛeA_VB*"7] ORMӭCTjB0lnM0WK59c.NkMb3~޳t{g颺hlj3̰nbh@!B(a HX֩fϞ=£aݺu|r֯_Ϲ瞫SN.5MXcݻ_X,QJY{tr8!09j>{jUP ICN NiZ%vb@^oSJGww~KP}b}4鉉JأӼyZ8UpZVuB|cYv$`J~8ef X 63iCr9&''󌏏clVo(JATҢBj(l6>l6vyJkRBYѲF(ҒrIʆ  f$bcccňbO;W BvBNT|~ѱ1j꺏088 k֬aڵ]:kJDAANLbO溬ɺ2,dm5l6JQ.X$T@ŲTJf,n$($ Gj;5\Y1gJ(I&r?o\vTۨTe[pj%c3Ĉ1T y~ J4 JJE%jnz|> ΥzlۉD"||LAAZM!JeRL&ϗ=aX55V} [FtuIl"  DNLLBRO@)OrP/+IOϛ J Q% lufHIJV#uH $պ`R,4 LT6K&/fADIb&sTS%U "yY:E-TJǘ:\%s4/ oL N~$g\ "fdiRThӢH=tu"Dr,_j< ;w䥗^bΝ:`Ŋwywy{]?NZrAΝ;ٹs'vbϞ=en76l/ /[@C 1(9#Ca"Srw0Xr%JV%ڴ~,X #f{,<[JLIh_und2y=mn,tBu x<|>_t(vIB|%s,3M;z/2u3h0椕X: /wccc ) LNNR(H$E$bt:MT" ČJB\ZR.DP)6 ݎcӉtxp\),hI# t: Bl6>?AA@AAA8`RԟS*J8fI*U&;aqߏawĭVN^/*rxHZŨT,ryq\Da<us.r!nő)FuTl&TDTeJ8ie6~:B'&$̲K&$NI``L9MH73#(d23<öm#xӕa) +ڵK XJr:lڴ͛7yfwr98yIqf'V+BV#^Htۉxxκlitu7:cy|>>n< HHWWSdŸN{gZ Xfb>@[IQG&h>Cf1}n"!LXմ <#wc۶mTU8 w .a)oO3OsN2h͛7w-[qOqR \p^hȑcQFaqb# 1cC1 0 2GYN}!5dVC ǣU53IC*WZ:Lg2-q]i^V,fd2] 9_y<pӴiIZM{<sFPKf dRN9]CoVՎJٴls1'QF VP.\.^/@@*BV2! 3a>nadY$l\.䤖2*>GQKtZKZl6KZP(P*T* 0'eiu|@=b|>\.G'^>Q ">[V}|H   ,EDAAةI&sC]6i3LN*Hbd*2Jx*EZ%ˑV.,՘βnFt {xzǃã j^<4 _#$ۋ?M &00$.RE6*$\.G6%ڮK$&ZשC*D"x^^/`P߰|ׅa|>_:~?O4yd! 0,nl? G۹QZaOuV^z%x;x;ߩ% \g}~~'| ٲe [l+^D{E^ĉ"le [thJmڌwSG("ӖE!H:nJd2 dU׎dep8L8K:N'ɶ˔ J(R)-[688N9A㻢m^t:w5MZX,n7Z $B>%XEvǺ~>۪AAND"A:P(J( \PbRE|>O>P(PTT*J%T*j5-B.t2j Vݎnpp8p\l6=vߪk~˅=|4=Na,n%  P"  ©E0FHnbD6H`''1J% B#(Ir*bdBT¨HU*MJ'b!hZlv is:8m6"(pz>`p #!B8  pnw}bU'D"őv"dl6;:mNS"^M8r @ Dža}1 v.`0(EBGAq$n[VWqH"DeZO{~e\\ʍG8Yϡ!xx sa˖-\y啼T*yn?O?MT\s ]tќ^$ʿߌ2+*z ?g\uW1C11=) M#J~:s,{jY,xjNkhM3?N=ڠ "UC<r0SFTjl6|>NH$!|>l6~Átjp4Cl6jŢ02v%8$ 2A\.8r9}Ha>J%=]Vre*N*:"SըVMQ:cXlMbtbZyjvcq:\.v>֪T u=Tɥkjyww7VU'Q1[AAAXZ"  p4RPfɌJ1&'"D,ϓM&If |T&C\&͒/0E҅JdHZ%W.V)p! n,sX,2͆"tv:].>!w8f#Յ$N'\@#8f;TAY;Y$ˑdH&Ӟ 3N}\NwS7g#4#ӝ&y$`0ۺ\.B+ɓ?&F'aMyڼμuK C|+Vjn|38c?3}<>qWe/_oCL&c=֭[طok׮kcÆ mo=ǝI1Á۸ g1ׂ(m-o.\C/ bG *9C%n:Ty;QTھ넍@ 'ND"M@@/[\.;VeJeNlyvx<V.Ft\8=mXX,vժ^]*9tXԒ* Ѕh l6l6v҃bхL]B;L0%Twlá[ ZM" 0 X6\.JB&t:MTj.* ah0 N(J:œtQ(Emtk06 NͦJfa7p8ltuueNOO`0PUk    oDAAA8Ij$ " dK8rhsn&oP YBD>OT"S,+(PgmXZ` iPT&׋n'`sX^"N'P1=Ĝv ZOAx5YZQTL$Iu]?S LJ;vpws=p!6nu]ǟ388ȓ<7&)vy.\qS8-ʋz觟^zi }AK?Ϸ.'̝%mw|8ڥn.iUĿbK5V)+vRaxx N盤l6KT?+Q*rT*|(1J6P"jViZyUy,%YXVBz[;,ada 47KAQ}m\z<6UctXR:feժ)vJrLXZRVtVyZ,P&-AT @'oY,-S8\..Kzi߯1TʅӂFt*9    "  pH$tQ"L&)i]ȗL&Vqj*z!d@!Jr9||@.P*)A>[,vjRb] j;$+,tT@  &aԅ@ U9Dmtօprd+2)l$Yrifd2J%.quя~TS @=/wRSX(ѵf6>3%xTzy뎻x衇+;fÜdaZxfr9ġNRM x<|>PHzD"zZus;ʰN###y;v<ZjW=2 JEbnتsꔭ9UgRх3%,$bBJłLUx a.lWhPCANNJXL3 z0r=enrrRoq@=J>*0ϵZMK>.\tz:6+BU(P%LjYdPK5 ǣ&NptV rvkcX|V>r:n6    Z"   Pf}T\x,FPh)%Qj͆nmZPմB\ON)qjx*r}63i] ٰ7j Vrԋv;ZRD鬋=Aj'j]T)ITS)Br^ϓ(1 9wn-\syl|2!CICDӼy}uY:,A94cF1>1 ~n>h4ǖ-[x{7;B_ &7~a,[œBXk?c޽G??.?͏64ZϏ:n̴m*UXh=>G"\..D\.|>GnIVp`۵4QTDSRJP].j5"bZGxbO`A$vE[ Ӊj2Ud04\.$9҆siZ5AN=TB0 FGGMDPfVӲC.bQKf)B%FA]0 C1UŦ*¼)u 5Шc9Jմ҅JfҧԴΠSsT===F"nJbAAAA8Q"   ,YtbIX,fuFt:T&KLj38.f QPv<Áb!hCFZZ6."6xV\*r Z G07~JR1)GzJVp N2i-TJ8Qc^=OuմZB( xQBwqK5PJ|Aa TaO/,J*Qx<VZe]WU֭[w ?>輟gXKӱ>o yhgwvͦM'?̛oɷmn0s9/k;ۿy7r[O~lܸq|y'woagW7\o`Æ e6ͳ>g^uU-]tJJ&eɤ>gPjD"[s0fR)JPI\nPn[D*1Al6[SdU҆VE{@uzmZq8Z:Q+" p8|8")Յ륧Noo/~AsS*"^R!c>4J1=_մ`H$c"Jje 5_(|R\.7ͫ $y%6M+Q}5 >UY5vHuUSv6`Pt8}rF(߿^/3AAAAX"   ljPݰ3 AB'jL۫="v<V+ $vc)6 p@nUaV 9P*aV VP.XW*P*XP,*6:T|*}-J6Fkb`?\5[`J\xZղOJ4OCCraz0~?FAمBXVE] *J\QR~_R2_/>6XkVjs\/g>mmSUTqV#ɰw^~_~ロ+rQo>B,ξBa022я~0/Yvmsz!_ z׻p: s}d27 v-֭[G>^+_ /I9w㦛n_O/#<ĉc٩P=zalME_Uh<'NpajtZTUL&CR!P,u'r#AKU7nslD o*jZSѧ[ GaukÁb1QǔDth/4MAŠgfrrR J5 BA}*ZP +NmSV1*1|P<2sB jrt3eu.%O*]BMc„0aZuJԅJ.B(Aw5'ձRwuuawFnw:'i    p#B    J%JEvJTB %Mr x5yBn76rAVVTOFQ`cV 4([P,Z,J%<2nR(ڊ*~ј6*Ai+>bcrV+%ˍdbFQ$T=E!m`-A4 'qo?8O~B4wQ_ I-DH[EnD~2RVVFFVn_mmm:QTvA~X.R M lt7Ɇف';lao@ׯK/aVצ-xMuɒ%o{gΜĉm6@Ö-[f١C8p@۷/77W)))mfjjjpB+4i&L쏳.ʇK$J:.)׼z<۾Ŧ m[(5B.qA 0H;u$EGGsߘs߈IRTTYjt2fH2FaQDDҔx$5J_:_|Q;vPHHƌ,u޽:7gMs=^{5ٳG;vTFF,X:^㖾mWO>ڷo JfΜ=cW[R_TT>'lW]iT~q|Qd*dXPYVN܁f~5:XH20B:\cQەd'ɼnhnY7Xf|B2:'\.Y_?`t\0:4zr RuuuA3+Qc|Qnl[зÃv߮F<n0:B7o>Fg$#Da\c[KִE/֔)StE͞=[E^54ns_~]o 6GW[~򓟨Czmݎ;t}833 SDDDŸN,5`̢:bu|Y{-UQQhj0eYȌ8FǮ &\FnM11^F1 FTǛ:3D2VFx Pm6[.< m4$hZN?t:l7B SHH_AXZgfp8 *աCS3y1/^e_{~J,)&<\NCG(::Z۶mkPxE^WUUU:v֭[7xC+VPZZLk֬Y~.]TSObh۶mݯzڿvͦ˥hy<&kl̖i}]B͙3G  fmђ1}}z'o>IR>}qF%''_u@4m4EEE߿qn*55UGWVIIImݍxꔐ_|,K5b[,_۷oo>=bљ3g$] <ڰa6mt]khcj͚5 G;vuzzz[6֚5mzܹSWRRl6$ qz'+uV͜96sL-ZHeeef3b(??~6MQQQ}UYYxv=h͚5ԦM4f2-G ILL[fꫯ멧 !--M[n˗j* 6L!!!L[ [1c`=smSlZ%))I~VX;S:u҂ oj„ ؘ[lQxx~*&&FÇ䜚s$effGUTTy&C8νzRvv>C%''GZzճgOsdeggkѣww}W5mLkcٲer\:tavh[㖼Wb(66V#G裏jϞ=իWƏe˖iժU'ө4=z5tP[N4ԩSuwLvRVVv{ܖ܂dٳG]tQFF %K*pzںu222ԯ_??| 2$nk/߿_ZrB{bIDAT4yd=#JMM=C;~V\˗5jf̘ >36@mHK._W߿_;w֤I#{b ӧj*\RwVbb&M0`@!\\R+Wԑ#GԣGM8Q~`O TWW{jӦMZ~vܩXedd֘1cdق=MK\R7nԁp84rH+--Mj 4ҙ3gyfmڴI[nUaa:vtM4I 449wY0e?^=zFaÆiРA7Gj׮]ڵko߮\+55 |_%SBBվ}yfm߾]vRII4x` 6LÇչs`OUTTh1C EEEr84hRSSQF)<<<JBl^WyyyfyNN} 5V~~uI}W^{-Ж@й\.ѣfѣGsΩN$%%%!$uU]vU(GP\pA:uTG~~ }└d/ijp3!6ot98qq)ԘũSN;ٵkW#hK.ܹs?O<*`rr;Tlll@njGgϞٳgUPPbڵkD%$$(>>^ j߾}mQQQA:C\/n[***Raa.\""QDDDAN:[n Y]mr . aK.*i>tmTXX"##-vJTYYGYYܯ$(!!Ѿ}{;vh>Ng"4/$RRR2(--m<0 QLL"##p80G$)<<\aaaXY,vkIddBCC%]bp8>UZZ{^Weee vuPMMT^^.J@ҥK|pݺx񢪪vUVVfn  =[Ж3ǣr]xQrݪlzp\*//ץKrTZZj)$UVV \E+qqqt\fSttp8t:PK\\Pddbbbe #!M!IeeejW~Mu$**Jv=e$ "p{h!7AS4IENDB`patroni-1.6.4/docs/index.rst000066400000000000000000000031151361356115100157710ustar00rootroot00000000000000.. Patroni documentation master file, created by sphinx-quickstart on Mon Dec 19 16:54:09 2016. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Introduction ============ Patroni is a template for you to create your own customized, high-availability solution using Python and - for maximum accessibility - a distributed configuration store like `ZooKeeper `__, `etcd `__, `Consul `__ or `Kubernetes `__. Database engineers, DBAs, DevOps engineers, and SREs who are looking to quickly deploy HA PostgreSQL in the datacenter-or anywhere else-will hopefully find it useful. We call Patroni a "template" because it is far from being a one-size-fits-all or plug-and-play replication system. It will have its own caveats. Use wisely. There are many ways to run high availability with PostgreSQL; for a list, see the `PostgreSQL Documentation `__. **Note to Kubernetes users**: Patroni can run natively on top of Kubernetes. Take a look at the :ref:`Kubernetes ` chapter of the Patroni documentation. .. toctree:: :maxdepth: 2 :caption: Contents: README dynamic_configuration rest_api ENVIRONMENT SETTINGS replica_bootstrap replication_modes pause kubernetes watchdog releases CONTRIBUTING Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` patroni-1.6.4/docs/kubernetes.rst000066400000000000000000000055621361356115100170410ustar00rootroot00000000000000.. _kubernetes: Using Patroni with Kubernetes ============================= Patroni can use Kubernetes objects in order to store the state of the cluster and manage the leader key. That makes it capable of operating Postgres in Kubernetes environment without any consistency store, namely, one doesn't need to run an extra Etcd deployment. There are two different type of Kubernetes objects Patroni can use to store the leader and the configuration keys, they are configured with the `kubernetes.use_endpoints` or `PATRONI_KUBERNETES_USE_ENDPOINTS` environment variable. Use Endpoints ------------- Despite the fact that this is the recommended mode, it is turned off by default for compatibility reasons. When it is on, Patroni stores the cluster configuration and the leader key in the `metadata: annotations` fields of the respective `Endpoints` it creates. Changing the leader is safer than when using `ConfigMaps`, since both the annotations, containing the leader information, and the actual addresses pointing to the running leader pod are updated simultaneously in one go. Use ConfigMaps -------------- In this mode, Patroni will create ConfigMaps instead of Endpoints and store keys inside meta-data of those ConfigMaps. Changing the leader takes at least two updates, one to the leader ConfigMap and another to the respective Endpoint. There are two ways to direct the traffic to the Postgres master: - use the `callback script `_ provided by Patroni - configure the Kubernetes Postgres service to use the label selector with the `role_label` (configured in patroni configuration). Note that in some cases, for instance, when running on OpenShift, there is no alternative to using ConfigMaps. Configuration ------------- Patroni Kubernetes :ref:`settings ` and :ref:`environment variables ` are described in the general chapters of the documentation. Examples -------- - The `kubernetes `__ folder of the Patroni repository contains examples of the Docker image, the Kubernetes manifest and the callback script in order to test Patroni Kubernetes setup. Note that in the current state it will not be able to use PersistentVolumes because of permission issues. - You can find the full-featured Docker image that can use Persistent Volumes in the `Spilo Project `_. - There is also a `Helm chart `_ to deploy the Spilo image configured with Patroni running using Kubernetes. - In order to run your database clusters at scale using Patroni and Spilo, take a look at the `postgres-operator `_ project. It implements the operator pattern to manage Spilo clusters. patroni-1.6.4/docs/pause.rst000066400000000000000000000050701361356115100160010ustar00rootroot00000000000000.. _pause: Pause/Resume mode for the cluster ================================= The goal -------- Under certain circumstances Patroni needs to temporary step down from managing the cluster, while still retaining the cluster state in DCS. Possible use cases are uncommon activities on the cluster, such as major version upgrades or corruption recovery. During those activities nodes are often started and stopped for the reason unknown to Patroni, some nodes can be even temporary promoted, violating the assumption of running only one master. Therefore, Patroni needs to be able to "detach" from the running cluster, implementing an equivalent of the maintenance mode in Pacemaker. The implementation ------------------ When Patroni runs in a paused mode, it does not change the state of PostgreSQL, except for the following cases: - For each node, the member key in DCS is updated with the current information about the cluster. This causes Patroni to run read-only queries on a member node if the member is running. - For the Postgres master with the leader lock Patroni updates the lock. If the node with the leader lock stops being the master (i.e. is demoted manually), Patroni will release the lock instead of promoting the node back. - Manual unscheduled restart, reinitialize and manual failover are allowed. Manual failover is only allowed if the node to failover to is specified. In the paused mode, manual failover does not require a running master node. - If 'parallel' masters are detected by Patroni, it emits a warning, but does not demote the masters without the leader lock. - If there is no leader lock in the cluster, the running master acquires the lock. If there is more than one master node, then the first master to acquire the lock wins. If there are no masters altogether, Patroni does not try to promote any replicas. There is an exception in this rule: if there is no leader lock because the old master has demoted itself due to the manual promotion, then only the candidate node mentioned in the promotion request may take the leader lock. When the new leader lock is granted (i.e. after promoting a replica manually), Patroni makes sure the replicas that were streaming from the previous leader will switch to the new one. - When Postgres is stopped, Patroni does not try to start it. When Patroni is stopped, it does not try to stop the Postgres instance it is managing. User guide ---------- ``patronictl`` supports ``pause`` and ``resume`` commands. One can also issue a ``PATCH`` request to the ``{namespace}/{cluster}/config`` key with ``{"pause": true/false/null}`` patroni-1.6.4/docs/releases.rst000066400000000000000000002543331361356115100164770ustar00rootroot00000000000000.. _releases: Release notes ============= Version 1.6.4 ------------- **New features** - Implemented ``--wait`` option for ``patronictl reinit`` (Igor Yanchenko) Patronictl will wait for ``reinit`` to finish is the ``--wait`` option is used. - Further improvements of Windows support (Igor Yanchenko, Alexander Kukushkin) 1. All shell scripts which are used for integration testing are rewritten in python 2. The ``pg_ctl kill`` will be used to stop postgres on non posix systems 3. Don't try to use unix-domain sockets **Stability improvements** - Make sure ``unix_socket_directories`` and ``stats_temp_directory`` exist (Igor) Upon the start of Patroni and Postgres make sure that ``unix_socket_directories`` and ``stats_temp_directory`` exist or try to create them. Patroni will exit if failed to create them. - Make sure ``postgresql.pgpass`` is located in the place where Patroni has write access (Igor) In case if it doesn't have a write access Patroni will exit with exception. - Disable Consul ``serfHealth`` check by default (Kostiantyn Nemchenko) Even in case of little network problems the failing ``serfHealth`` leads to invalidation of all sessions associated with the node. Therefore, the leader key is lost much earlier than ``ttl`` which causes unwanted restarts of replicas and maybe demotion of the primary. - Configure tcp keepalives for connections to K8s API (Alexander) In case if we get nothing from the socket after TTL seconds it can be considered dead. - Avoid logging of passwords on user creation (Alexander) If the password is rejected or logging is configured to verbose or not configured at all it might happen that the password is written into postgres logs. In order to avoid it Patroni will change ``log_statement``, ``log_min_duration_statement``, and ``log_min_error_statement`` to some safe values before doing the attempt to create/update user. **Bugfixes** - Use ``restore_command`` from the ``standby_cluster`` config on cascading replicas (Alexander) The ``standby_leader`` was already doing it from the beginning the feature existed. Not doing the same on replicas might prevent them from catching up with standby leader. - Update timeline reported by the standby cluster (Alexander) In case of timeline switch the standby cluster was correctly replicating from the primary but ``patronictl`` was reporting the old timeline. - Allow certain recovery parameters be defined in the custom_conf (Alexander) When doing validation of recovery parameters on replica Patroni will skip ``archive_cleanup_command``, ``promote_trigger_file``, ``recovery_end_command``, ``recovery_min_apply_delay``, and ``restore_command`` if they are not defined in the patroni config but in files other than ``postgresql.auto.conf`` or ``postgresql.conf``. - Improve handling of postgresql parameters with period in its name (Alexander) Such parameters could be defined by extensions where the unit is not necessarily a string. Changing the value might require a restart (for example ``pg_stat_statements.max``). - Improve exception handling during shutdown (Alexander) During shutdown Patroni is trying to update its status in the DCS. If the DCS is inaccessible an exception might be raised. Lack of exception handling was preventing logger thread from stopping. Version 1.6.3 ------------- **Bugfixes** - Don't expose password when running ``pg_rewind`` (Alexander Kukushkin) Bug was introduced in the `#1301 `__ - Apply connection parameters specified in the ``postgresql.authentication`` to ``pg_basebackup`` and custom replica creation methods (Alexander) They were relying on url-like connection string and therefore parameters never applied. Version 1.6.2 ------------- **New features** - Implemented ``patroni --version`` (Igor Yanchenko) It prints the current version of Patroni and exits. - Set the ``user-agent`` http header for all http requests (Alexander Kukushkin) Patroni is communicating with Consul, Etcd, and Kubernetes API via the http protocol. Having a specifically crafted ``user-agent`` (example: ``Patroni/1.6.2 Python/3.6.8 Linux``) might be useful for debugging and monitoring. - Make it possible to configure log level for exception tracebacks (Igor) If you set ``log.traceback_level=DEBUG`` the tracebacks will be visible only when ``log.level=DEBUG``. The default behavior remains the same. **Stability improvements** - Avoid importing all DCS modules when searching for the module required by the config file (Alexander) There is no need to import modules for Etcd, Consul, and Kubernetes if we need only e.g. Zookeeper. It helps to reduce memory usage and solves the problem of having INFO messages ``Failed to import smth``. - Removed python ``requests`` module from explicit requirements (Alexander) It wasn't used for anything critical, but causing a lot of problems when the new version of ``urllib3`` is released. - Improve handling of ``etcd.hosts`` written as a comma-separated string instead of YAML array (Igor) Previously it was failing when written in format ``host1:port1, host2:port2`` (the space character after the comma). **Usability improvements** - Don't force users to choose members from an empty list in ``patronictl`` (Igor) If the user provides a wrong cluster name, we will raise an exception rather than ask to choose a member from an empty list. - Make the error message more helpful if the REST API cannot bind (Igor) For an inexperienced user it might be hard to figure out what is wrong from the Python stacktrace. **Bugfixes** - Fix calculation of ``wal_buffers`` (Alexander) The base unit has been changed from 8 kB blocks to bytes in PostgreSQL 11. - Use ``passfile`` in ``primary_conninfo`` only on PostgreSQL 10+ (Alexander) On older versions there is no guarantee that ``passfile`` will work, unless the latest version of ``libpq`` is installed. Version 1.6.1 ------------- **New features** - Added ``PATRONICTL_CONFIG_FILE`` environment variable (msvechla) It allows configuring the ``--config-file`` argument for ``patronictl`` from the environment. - Implement ``patronictl history`` (Alexander Kukushkin) It shows the history of failovers/switchovers. - Pass ``-c statement_timeout=0`` in ``PGOPTIONS`` when doing ``pg_rewind`` (Alexander Kukushkin) It protects from the case when ``statement_timeout`` on the server is set to some small value and one of the statements executed by pg_rewind is canceled. - Allow lower values for PostgreSQL configuration (Soulou) Patroni didn't allow some of the PostgreSQL configuration parameters be set smaller than some hardcoded values. Now the minimal allowed values are smaller, default values have not been changed. - Allow for certificate-based authentication (Jonathan S. Katz) This feature enables certificate-based authentication for superuser, replication, rewind accounts and allows the user to specify the ``sslmode`` they wish to connect with. - Use the ``passfile`` in the ``primary_conninfo`` instead of password (Alexander Kukushkin) It allows to avoid setting ``600`` permissions on postgresql.conf - Perform ``pg_ctl reload`` regardless of config changes (Alexander Kukushkin) It is possible that some config files are not controlled by Patroni. When somebody is doing a reload via the REST API or by sending SIGHUP to the Patroni process, the usual expectation is that Postgres will also be reloaded. Previously it didn't happen when there were no changes in the ``postgresql`` section of Patroni config. - Compare all recovery parameters, not only ``primary_conninfo`` (Alexander Kukushkin) Previously the ``check_recovery_conf()`` method was only checking whether ``primary_conninfo`` has changed, never taking into account all other recovery parameters. - Make it possible to apply some recovery parameters without restart (Alexander Kukushkin) Starting from PostgreSQL 12 the following recovery parameters could be changed without restart: ``archive_cleanup_command``, ``promote_trigger_file``, ``recovery_end_command``, and ``recovery_min_apply_delay``. In future Postgres releases this list will be extended and Patroni will support it automatically. - Make it possible to change ``use_slots`` online (Alexander Kukushkin) Previously it required restarting Patroni and removing slots manually. - Remove only ``PATRONI_`` prefixed environment variables when starting up Postgres (Cody Coons) It will solve a lot of problems with running different Foreign Data Wrappers. **Stability improvements** - Use LIST + WATCH when working with K8s API (Alexander Kukushkin) It allows to efficiently receive object changes (pods, endpoints/configmaps) and makes less stress on K8s master nodes. - Improve the workflow when PGDATA is not empty during bootstrap (Alexander Kukushkin) According to the ``initdb`` source code it might consider a PGDATA empty when there are only ``lost+found`` and ``.dotfiles`` in it. Now Patroni does the same. If ``PGDATA`` happens to be non-empty, and at the same time not valid from the ``pg_controldata`` point of view, Patroni will complain and exit. - Avoid calling expensive ``os.listdir()`` on every HA loop (Alexander Kukushkin) When the system is under IO stress, ``os.listdir()`` could take a few seconds (or even minutes) to execute, badly affecting the HA loop of Patroni. This could even cause the leader key to disappear from DCS due to the lack of updates. There is a better and less expensive way to check that the PGDATA is not empty. Now we check the presence of the ``global/pg_control`` file in the PGDATA. - Some improvements in logging infrastructure (Alexander Kukushkin) Previously threre was a possibility to loose the last few log lines on shutdown because the logging thread was a ``daemon`` thread. - Use ``spawn`` multiprocessing start method on python 3.4+ (Maciej Kowalczyk) It is a known `issue `__ in Python that threading and multiprocessing do not mix well. Switching from the default method ``fork`` to the ``spawn`` is a recommended workaround. Not doing so might result in the Postmaster starting process hanging and Patroni indefinitely reporting ``INFO: restarting after failure in progress``, while Postgres is actually up and running. **Improvements in REST API** - Make it possible to check client certificates in the REST API (Alexander Kukushkin) If the ``verify_client`` is set to ``required``, Patroni will check client certificates for all REST API calls. When it is set to ``optional``, client certificates are checked for all unsafe REST API endpoints. - Return the response code 503 for the ``GET /replica`` health check request if Postgres is not running (Alexander Anikin) Postgres might spend significant time in recovery before it starts accepting client connections. - Implement ``/history`` and ``/cluster`` endpoints (Alexander Kukushkin) The ``/history`` endpoint shows the content of the ``history`` key in DCS. The ``/cluster`` endpoint shows all cluster members and some service info like pending and scheduled restarts or switchovers. **Improvements in Etcd support** - Retry on Etcd RAFT internal error (Alexander Kukushkin) When the Etcd node is being shut down, it sends ``response code=300, data='etcdserver: server stopped'``, which was causing Patroni to demote the primary. - Don't give up on Etcd request retry too early (Alexander Kukushkin) When there were some network problems, Patroni was quickly exhausting the list of Etcd nodes and giving up without using the whole ``retry_timeout``, potentially resulting in demoting the primary. **Bugfixes** - Disable ``synchronous_commit`` when granting execute permissions to the ``pg_rewind`` user (kremius) If the bootstrap is done with ``synchronous_mode_strict: true`` the `GRANT EXECUTE` statement was waiting indefinitely due to the non-synchronous nodes being available. - Fix memory leak on python 3.7 (Alexander Kukushkin) Patroni is using ``ThreadingMixIn`` to process REST API requests and python 3.7 made threads spawn for every request non-daemon by default. - Fix race conditions in asynchronous actions (Alexander Kukushkin) There was a chance that ``patronictl reinit --force`` could be overwritten by the attempt to recover stopped Postgres. This ended up in a situation when Patroni was trying to start Postgres while basebackup was running. - Fix race condition in ``postmaster_start_time()`` method (Alexander Kukushkin) If the method is executed from the REST API thread, it requires a separate cursor object to be created. - Fix the problem of not promoting the sync standby that had a name contaning upper case letters (Alexander Kukushkin) We converted the name to the lower case because Postgres was doing the same while comparing the ``application_name`` with the value in ``synchronous_standby_names``. - Kill all children along with the callback process before starting the new one (Alexander Kukushkin) Not doing so makes it hard to implement callbacks in bash and eventually can lead to the situation when two callbacks are running at the same time. - Fix 'start failed' issue (Alexander Kukushkin) Under certain conditions the Postgres state might be set to 'start failed' despite Postgres being up and running. Version 1.6.0 ------------- This version adds compatibility with PostgreSQL 12, makes is possible to run pg_rewind without superuser on PostgreSQL 11 and newer, and enables IPv6 support. **New features** - Psycopg2 was removed from requirements and must be installed independently (Alexander Kukushkin) Starting from 2.8.0 ``psycopg2`` was split into two different packages, ``psycopg2``, and ``psycopg2-binary``, which could be installed at the same time into the same place on the filesystem. In order to decrease dependency hell problem, we let a user choose how to install it. There are a few options available, please consult the :ref:`documentation `. - Compatibility with PostgreSQL 12 (Alexander Kukushkin) Starting from PostgreSQL 12 there is no ``recovery.conf`` anymore and all former recovery parameters are converted into `GUC `_. In order to protect from ``ALTER SYSTEM SET primary_conninfo`` or similar, Patroni will parse ``postgresql.auto.conf`` and remove all standby and recovery parameters from there. Patroni config remains backward compatible. For example despite ``restore_command`` being a GUC, one can still specify it in the ``postgresql.recovery_conf.restore_command`` section and Patroni will write it into ``postgresql.conf`` for PostgreSQL 12. - Make it possible to use ``pg_rewind`` without superuser on PostgreSQL 11 and newer (Alexander Kukushkin) If you want to use this feature please define ``username`` and ``password`` in the ``postgresql.authentication.rewind`` section of Patroni configuration file. For an already existing cluster you will have to create the user manually and ``GRANT EXECUTE`` permission on a few functions. You can find more details in the PostgreSQL `documentation `__. - Do a smart comparison of actual and desired ``primary_conninfo`` values on replicas (Alexander Kukushkin) It might help to avoid replica restart when you are converting an already existing primary-standby cluster to one managed by Patroni - IPv6 support (Alexander Kukushkin) There were two major issues. Patroni REST API service was listening only on ``0.0.0.0`` and IPv6 IP addresses used in the ``api_url`` and ``conn_url`` were not properly quoted. - Kerberos support (Ajith Vilas, Alexander Kukushkin) It makes possible using Kerberos authentication between Postgres nodes instead of defining passwords in Patroni configuration file - Manage ``pg_ident.conf`` (Alexander Kukushkin) This functionality works similarly to ``pg_hba.conf``: if the ``postgresql.pg_ident`` is defined in the config file or DCS, Patroni will write its value to ``pg_ident.conf``, however, if ``postgresql.parameters.ident_file`` is defined, Patroni will assume that ``pg_ident`` is managed from outside and not update the file. **Improvements in REST API** - Added ``/health`` endpoint (Wilfried Roset) It will return an HTTP status code only if PostgreSQL is running - Added ``/read-only`` and ``/read-write`` endpoints (Julien Riou) The ``/read-only`` endpoint enables reads balanced across replicas and the primary. The ``/read-write`` endpoint is an alias for ``/primary``, ``/leader`` and ``/master``. - Use ``SSLContext`` to wrap the REST API socket (Julien Riou) Usage of ``ssl.wrap_socket()`` is deprecated and was still allowing soon-to-be-deprecated protocols like TLS 1.1. **Logging improvements** - Two-step logging (Alexander Kukushkin) All log messages are first written into the in-memory queue and later they are asynchronously flushed into the stderr or file from a separate thread. The maximum queue size is limited (configurable). If the limit is reached, Patroni will start losing logs, which is still better than blocking the HA loop. - Enable debug logging for GET/OPTIONS API calls together with latency (Jan Tomsa) It will help with debugging of health-checks performed by HAProxy, Consul or other tooling that decides which node is the primary/replica. - Log exceptions caught in Retry (Daniel Kucera) Log the final exception when either the number of attempts or the timeout were reached. It will hopefully help to debug some issues when communication to DCS fails. **Improvements in patronictl** - Enhance dialogues for scheduled switchover and restart (Rafia Sabih) Previously dialogues did not take into account scheduled actions and therefore were misleading. - Check if config file exists (Wilfried Roset) Be verbose about configuration file when the given filename does not exists, instead of ignoring silently (which can lead to misunderstanding). - Add fallback value for ``EDITOR`` (Wilfried Roset) When the ``EDITOR`` environment variable was not defined, ``patronictl edit-config`` was failing with `PatroniCtlException`. The new strategy is to try ``editor`` and than ``vi``, which should be available on most systems. **Improvements in Consul support** - Allow to specify Consul consistency mode (Jan Tomsa) You can read more about consistency mode `here `__. - Reload Consul config on SIGHUP (Cameron Daniel, Alexander Kukushkin) It is especially useful when somebody is changing the value of ``token``. **Bugfixes** - Fix corner case in switchover/failover (Sharoon Thomas) The variable ``scheduled_at`` may be undefined if REST API is not accessible and we are using DCS as a fallback. - Open trust to localhost in ``pg_hba.conf`` during custom bootstrap (Alexander Kukushkin) Previously it was open only to unix_socket, which was causing a lot of errors: ``FATAL: no pg_hba.conf entry for replication connection from host "127.0.0.1", user "replicator"`` - Consider synchronous node as healthy even when the former leader is ahead (Alexander Kukushkin) If the primary loses access to the DCS, it restarts Postgres in read-only, but it might happen that other nodes can still access the old primary via the REST API. Such a situation was causing the synchronous standby not to promote because the old primary was reporting WAL position ahead of the synchronous standby. - Standby cluster bugfixes (Alexander Kukushkin) Make it possible to bootstrap a replica in a standby cluster when the standby_leader is not accessible and a few other minor fixes. Version 1.5.6 ------------- **New features** - Support work with etcd cluster via set of proxies (Alexander Kukushkin) It might happen that etcd cluster is not accessible directly but via set of proxies. In this case Patroni will not perform etcd topology discovery but just round-robin via proxy hosts. Behavior is controlled by `etcd.use_proxies`. - Changed callbacks behavior when role on the node is changed (Alexander) If the role was changed from `master` or `standby_leader` to `replica` or from `replica` to `standby_leader`, `on_restart` callback will not be called anymore in favor of `on_role_change` callback. - Change the way how we start postgres (Alexander) Use `multiprocessing.Process` instead of executing itself and `multiprocessing.Pipe` to transmit the postmaster pid to the Patroni process. Before that we were using pipes, what was leaving postmaster process with stdin closed. **Bug fixes** - Fix role returned by REST API for the standby leader (Alexander) It was incorrectly returning `replica` instead of `standby_leader` - Wait for callback end if it could not be killed (Julien Tachoires) Patroni doesn't have enough privileges to terminate the callback script running under `sudo` what was cancelling the new callback. If the running script could not be killed, Patroni will wait until it finishes and then run the next callback. - Reduce lock time taken by dcs.get_cluster method (Alexander) Due to the lock being held DCS slowness was affecting the REST API health checks causing false positives. - Improve cleaning of PGDATA when `pg_wal`/`pg_xlog` is a symlink (Julien) In this case Patroni will explicitly remove files from the target directory. - Remove unnecessary usage of os.path.relpath (Ants Aasma) It depends on being able to resolve the working directory, what will fail if Patroni is started in a directory that is later unlinked from the filesystem. - Do not enforce ssl version when communicating with Etcd (Alexander) For some unknown reason python3-etcd on debian and ubuntu are not based on the latest version of the package and therefore it enforces TLSv1 which is not supported by Etcd v3. We solved this problem on Patroni side. Version 1.5.5 ------------- This version introduces the possibility of automatic reinit of the former master, improves patronictl list output and fixes a number of bugs. **New features** - Add support of `PATRONI_ETCD_PROTOCOL`, `PATRONI_ETCD_USERNAME` and `PATRONI_ETCD_PASSWORD` environment variables (Étienne M) Before it was possible to configure them only in the config file or as a part of `PATRONI_ETCD_URL`, which is not always convenient. - Make it possible to automatically reinit the former master (Alexander Kukushkin) If the pg_rewind is disabled or can't be used, the former master could fail to start as a new replica due to diverged timelines. In this case, the only way to fix it is wiping the data directory and reinitializing. This behavior could be changed by setting `postgresql.remove_data_directory_on_diverged_timelines`. When it is set, Patroni will wipe the data directory and reinitialize the former master automatically. - Show information about timelines in patronictl list (Alexander) It helps to detect stale replicas. In addition to that, `Host` will include ':{port}' if the port value isn't default or there is more than one member running on the same host. - Create a headless service associated with the $SCOPE-config endpoint (Alexander) The "config" endpoint keeps information about the cluster-wide Patroni and Postgres configuration, history file, and last but the most important, it holds the `initialize` key. When the Kubernetes master node is restarted or upgraded, it removes endpoints without services. The headless service will prevent it from being removed. **Bug fixes** - Adjust the read timeout for the leader watch blocking query (Alexander) According to the Consul documentation, the actual response timeout is increased by a small random amount of additional wait time added to the supplied maximum wait time to spread out the wake up time of any concurrent requests. It adds up to `wait / 16` additional time to the maximum duration. In our case we are adding `wait / 15` or 1 second depending on what is bigger. - Always use replication=1 when connecting via replication protocol to the postgres (Alexander) Starting from Postgres 10 the line in the pg_hba.conf with database=replication doesn't accept connections with the parameter replication=database. - Don't write primary_conninfo into recovery.conf for wal-only standby cluster (Alexander) Despite not having neither `host` nor `port` defined in the `standby_cluster` config, Patroni was putting the `primary_conninfo` into the `recovery.conf`, which is useless and generating a lot of errors. Version 1.5.4 ------------- This version implements flexible logging and fixes a number of bugs. **New features** - Improvements in logging infrastructure (Alexander Kukushkin, Lucas Capistrant, Alexander Anikin) Logging configuration could be configured not only from environment variables but also from Patroni config file. It makes it possible to change logging configuration in runtime by updating config and doing reload or sending SIGHUP to the Patroni process. By default Patroni writes logs to stderr, but now it becomes possible to write logs directly into the file and rotate when it reaches a certain size. In addition to that added support of custom dateformat and the possibility to fine-tune log level for each python module. - Make it possible to take into account the current timeline during leader elections (Alexander Kukushkin) It could happen that the node is considering itself as a healthiest one although it is currently not on the latest known timeline. In some cases we want to avoid promoting of such node, which could be achieved by setting `check_timeline` parameter to `true` (default behavior remains unchanged). - Relaxed requirements on superuser credentials Libpq allows opening connections without explicitly specifying neither username nor password. Depending on situation it relies either on pgpass file or trust authentication method in pg_hba.conf. Since pg_rewind is also using libpq, it will work the same way. - Implemented possibility to configure Consul Service registration and check interval via environment variables (Alexander Kukushkin) Registration of service in Consul was added in the 1.5.0, but so far it was only possible to turn it on via patroni.yaml. **Stability Improvements** - Set archive_mode to off during the custom bootstrap (Alexander Kukushkin) We want to avoid archiving wals and history files until the cluster is fully functional. It really helps if the custom bootstrap involves pg_upgrade. - Apply five seconds backoff when loading global config on start (Alexander Kukushkin) It helps to avoid hammering DCS when Patroni just starting up. - Reduce amount of error messages generated on shutdown (Alexander Kukushkin) They were harmless but rather annoying and sometimes scary. - Explicitly secure rw perms for recovery.conf at creation time (Lucas) We don't want anybody except patroni/postgres user reading this file, because it contains replication user and password. - Redirect HTTPServer exceptions to logger (Julien Riou) By default, such exceptions were logged on standard output messing with regular logs. **Bug fixes** - Removed stderr pipe to stdout on pg_ctl process (Cody Coons) Inheriting stderr from the main Patroni process allows all Postgres logs to be seen along with all patroni logs. This is very useful in a container environment as Patroni and Postgres logs may be consumed using standard tools (docker logs, kubectl, etc). In addition to that, this change fixes a bug with Patroni not being able to catch postmaster pid when postgres writing some warnings into stderr. - Set Consul service check deregister timeout in Go time format (Pavel Kirillov) Without explicitly mentioned time unit registration was failing. - Relax checks of standby_cluster cluster configuration (Dmitry Dolgov, Alexander Kukushkin) It was accepting only strings as valid values and therefore it was not possible to specify the port as integer and create_replica_methods as a list. Version 1.5.3 ------------- Compatibility and bugfix release. - Improve stability when running with python3 against zookeeper (Alexander Kukushkin) Change of `loop_wait` was causing Patroni to disconnect from zookeeper and never reconnect back. - Fix broken compatibility with postgres 9.3 (Alexander) When opening a replication connection we should specify replication=1, beacuse 9.3 does not understand replication='database' - Make sure we refresh Consul session at least once per HA loop and improve handling of consul sessions exceptions (Alexander) Restart of local consul agent invalidates all sessions related to the node. Not calling session refresh on time and not doing proper handling of session errors was causing demote of the primary. Version 1.5.2 ------------- Compatibility and bugfix release. - Compatibility with kazoo-2.6.0 (Alexander Kukushkin) In order to make sure that requests are performed with an appropriate timeout, Patroni redefines create_connection method from python-kazoo module. The last release of kazoo slightly changed the way how create_connection method is called. - Fix Patroni crash when Consul cluster loses the leader (Alexander) The crash was happening due to incorrect implementation of touch_member method, it should return boolean and not raise any exceptions. Version 1.5.1 ------------- This version implements support of permanent replication slots, adds support of pgBackRest and fixes number of bugs. **New features** - Permanent replication slots (Alexander Kukushkin) Permanent replication slots are preserved on failover/switchover, that is, Patroni on the new primary will create configured replication slots right after doing promote. Slots could be configured with the help of `patronictl edit-config`. The initial configuration could be also done in the :ref:`bootstrap.dcs `. - Add pgbackrest support (Yogesh Sharma) pgBackrest can restore in existing $PGDATA folder, this allows speedy restore as files which have not changed since last backup are skipped, to support this feature new parameter `keep_data` has been introduced. See :ref:`replica creation method ` section for additional examples. **Bug fixes** - A few bugfixes in the "standby cluster" workflow (Alexander) Please see https://github.com/zalando/patroni/pull/823 for more details. - Fix REST API health check when cluster management is paused and DCS is not accessible (Alexander) Regression was introduced in https://github.com/zalando/patroni/commit/90cf930036a9d5249265af15d2b787ec7517cf57 Version 1.5.0 ------------- This version enables Patroni HA cluster to operate in a standby mode, introduces experimental support for running on Windows, and provides a new configuration parameter to register PostgreSQL service in Consul. **New features** - Standby cluster (Dmitry Dolgov) One or more Patroni nodes can form a standby cluster that runs alongside the primary one (i.e. in another datacenter) and consists of standby nodes that replicate from the master in the primary cluster. All PostgreSQL nodes in the standby cluster are replicas; one of those replicas elects itself to replicate directly from the remote master, while the others replicate from it in a cascading manner. More detailed description of this feature and some configuration examples can be found at :ref:`here `. - Register Services in Consul (Pavel Kirillov, Alexander Kukushkin) If `register_service` parameter in the consul :ref:`configuration ` is enabled, the node will register a service with the name `scope` and the tag `master`, `replica` or `standby-leader`. - Experimental Windows support (Pavel Golub) From now on it is possible to run Patroni on Windows, although Windows support is brand-new and hasn't received as much real-world testing as its Linux counterpart. We welcome your feedback! **Improvements in patronictl** - Add patronictl -k/--insecure flag and support for restapi cert (Wilfried Roset) In the past if the REST API was protected by the self-signed certificates `patronictl` would fail to verify them. There was no way to disable that verification. It is now possible to configure `patronictl` to skip the certificate verification altogether or provide CA and client certificates in the :ref:`ctl: ` section of configuration. - Exclude members with nofailover tag from patronictl switchover/failover output (Alexander Anikin) Previously, those members were incorrectly proposed as candidates when performing interactive switchover or failover via patronictl. **Stability improvements** - Avoid parsing non-key-value output lines of pg_controldata (Alexander Anikin) Under certain circuimstances pg_controldata outputs lines without a colon character. That would trigger an error in Patroni code that parsed pg_controldata output, hiding the actual problem; often such lines are emitted in a warning shown by pg_controldata before the regular output, i.e. when the binary major version does not match the one of the PostgreSQL data directory. - Add member name to the error message during the leader election (Jan Mussler) During the leader election, Patroni connects to all known members of the cluster and requests their status. Such status is written to the Patroni log and includes the name of the member. Previously, if the member was not accessible, the error message did not indicate its name, containing only the URL. - Immediately reserve the WAL position upon creation of the replication slot (Alexander Kukushkin) Starting from 9.6, `pg_create_physical_replication_slot` function provides an additional boolean parameter `immediately_reserve`. When it is set to `false`, which is also the default, the slot doesn't reserve the WAL position until it receives the first client connection, potentially losing some segments required by the client in a time window between the slot creation and the intiial client connection. - Fix bug in strict synchronous replication (Alexander Kukushkin) When running with `synchronous_mode_strict: true`, in some cases Patroni puts `*` into the `synchronous_standby_names`, changing the sync state for most of the replication connections to `potential`. Previously, Patroni couldn't pick a synchronous candidate under such curcuimstances, as it only considered those with the state `async`. Version 1.4.6 ------------- **Bug fixes and stability improvements** This release fixes a critical issue with Patroni API /master endpoint returning 200 for the non-master node. This is a reporting issue, no actual split-brain, but under certain circumstances clients might be directed to the read-only node. - Reset is_leader status on demote (Alexander Kukushkin, Oleksii Kliukin) Make sure demoted cluster member stops responding with code 200 on the /master API call. - Add new "cluster_unlocked" field to the API output (Dmitry Dolgov) This field indicates whether the cluster has the master running. It can be used when it is not possible to query any other node but one of the replicas. Version 1.4.5 ------------- **New features** - Improve logging when applying new postgres configuration (Don Seiler) Patroni logs changed parameter names and values. - Python 3.7 compatibility (Christoph Berg) async is a reserved keyword in python3.7 - Set state to "stopped" in the DCS when a member is shut down (Tony Sorrentino) This shows the member state as "stopped" in "patronictl list" command. - Improve the message logged when stale postmaster.pid matches a running process (Ants Aasma) The previous one was beyond confusing. - Implement patronictl reload functionality (Don Seiler) Before that it was only possible to reload configuration by either calling REST API or by sending SIGHUP signal to the Patroni process. - Take and apply some parameters from controldata when starting as a replica (Alexander Kukushkin) The value of `max_connections` and some other parameters set in the global configuration may be lower than the one actually used by the primary; when this happens, the replica cannot start and should be fixed manually. Patroni takes care of that now by reading and applying the value from `pg_controldata`, starting postgres and setting `pending_restart` flag. - If set, use LD_LIBRARY_PATH when starting postgres (Chris Fraser) When starting up Postgres, Patroni was passing along PATH, LC_ALL and LANG env vars if they are set. Now it is doing the same with LD_LIBRARY_PATH. It should help if somebody installed PostgreSQL to non-standard place. - Rename create_replica_method to create_replica_methods (Dmitry Dolgov) To make it clear that it's actually an array. The old name is still supported for backward compatibility. **Bug fixes and stability improvements** - Fix condition for the replica start due to pg_rewind in paused state (Oleksii Kliukin) Avoid starting the replica that had already executed pg_rewind before. - Respond 200 to the master health-check only if update_lock has been successful (Alexander) Prevent Patroni from reporting itself a master on the former (demoted) master if DCS is partitioned. - Fix compatibility with the new consul module (Alexander) Starting from v1.1.0 python-consul changed internal API and started using `list` instead of `dict` to pass query parameters. - Catch exceptions from Patroni REST API thread during shutdown (Alexander) Those uncaught exceptions kept PostgreSQL running at shutdown. - Do crash recovery only when Postgres runs as the master (Alexander) Require `pg_controldata` to report 'in production' or 'shutting down' or 'in crash recovery'. In all other cases no crash recovery is necessary. - Improve handling of configuration errors (Henning Jacobs, Alexander) It is possible to change a lot of parameters in runtime (including `restapi.listen`) by updating Patroni config file and sending SIGHUP to Patroni process. This fix eliminates obscure exceptions from the 'restapi' thread when some of the parameters receive invalid values. Version 1.4.4 ------------- **Stability improvements** - Fix race condition in poll_failover_result (Alexander Kukushkin) It didn't affect directly neither failover nor switchover, but in some rare cases it was reporting success too early, when the former leader released the lock, producing a 'Failed over to "None"' instead of 'Failed over to "desired-node"' message. - Treat Postgres parameter names as case insensitive (Alexander) Most of the Postgres parameters have snake_case names, but there are three exceptions from this rule: DateStyle, IntervalStyle and TimeZone. Postgres accepts those parameters when written in a different case (e.g. timezone = 'some/tzn'); however, Patroni was unable to find case-insensitive matches of those parameter names in pg_settings and ignored such parameters as a result. - Abort start if attaching to running postgres and cluster not initialized (Alexander) Patroni can attach itself to an already running Postgres instance. It is imperative to start running Patroni on the master node before getting to the replicas. - Fix behavior of patronictl scaffold (Alexander) Pass dict object to touch_member instead of json encoded string, DCS implementation will take care of encoding it. - Don't demote master if failed to update leader key in pause (Alexander) During maintenance a DCS may start failing write requests while continuing to responds to read ones. In that case, Patroni used to put the Postgres master node to a read-only mode after failing to update the leader lock in DCS. - Sync replication slots when Patroni notices a new postmaster process (Alexander) If Postgres has been restarted, Patroni has to make sure that list of replication slots matches its expectations. - Verify sysid and sync replication slots after coming out of pause (Alexander) During the `maintenance` mode it may happen that data directory was completely rewritten and therefore we have to make sure that `Database system identifier` still belongs to our cluster and replication slots are in sync with Patroni expectations. - Fix a possible failure to start not running Postgres on a data directory with postmaster lock file present (Alexander) Detect reuse of PID from the postmaster lock file. More likely to hit such problem if you run Patroni and Postgres in the docker container. - Improve protection of DCS being accidentally wiped (Alexander) Patroni has a lot of logic in place to prevent failover in such case; it can also restore all keys back; however, until this change an accidental removal of /config key was switching off pause mode for 1 cycle of HA loop. - Do not exit when encountering invalid system ID (Oleksii Kliukin) Do not exit when the cluster system ID is empty or the one that doesn't pass the validation check. In that case, the cluster most likely needs a reinit; mention it in the result message. Avoid terminating Patroni, as otherwise reinit cannot happen. **Compatibility with Kubernetes 1.10+** - Added check for empty subsets (Cody Coons) Kubernetes 1.10.0+ started returning `Endpoints.subsets` set to `None` instead of `[]`. **Bootstrap improvements** - Make deleting recovery.conf optional (Brad Nicholson) If `bootstrap..keep_existing_recovery_conf` is defined and set to ``True``, Patroni will not remove the existing ``recovery.conf`` file. This is useful when bootstrapping from a backup with tools like pgBackRest that generate the appropriate `recovery.conf` for you. - Allow options to the basebackup built-in method (Oleksii) It is now possible to supply options to the built-in basebackup method by defining the `basebackup` section in the configuration, similar to how those are defined for custom replica creation methods. The difference is in the format accepted by the `basebackup` section: since pg_basebackup accepts both `--key=value` and `--key` options, the contents of the section could be either a dictionary of key-value pairs, or a list of either one-element dictionaries or just keys (for the options that don't accept values). See :ref:`replica creation method ` section for additional examples. Version 1.4.3 ------------- **Improvements in logging** - Make log level configurable from environment variables (Andy Newton, Keyvan Hedayati) `PATRONI_LOGLEVEL` - sets the general logging level `PATRONI_REQUESTS_LOGLEVEL` - sets the logging level for all HTTP requests e.g. Kubernetes API calls See `the docs for Python logging ` to get the names of possible log levels **Stability improvements and bug fixes** - Don't rediscover etcd cluster topology when watch timed out (Alexander Kukushkin) If we have only one host in etcd configuration and exactly this host is not accessible, Patroni was starting discovery of cluster topology and never succeeding. Instead it should just switch to the next available node. - Write content of bootstrap.pg_hba into a pg_hba.conf after custom bootstrap (Alexander) Now it behaves similarly to the usual bootstrap with `initdb` - Single user mode was waiting for user input and never finish (Alexander) Regression was introduced in https://github.com/zalando/patroni/pull/576 Version 1.4.2 ------------- **Improvements in patronictl** - Rename scheduled failover to scheduled switchover (Alexander Kukushkin) Failover and switchover functions were separated in version 1.4, but `patronictl list` was still reporting `Scheduled failover` instead of `Scheduled switchover`. - Show information about pending restarts (Alexander) In order to apply some configuration changes sometimes it is necessary to restart postgres. Patroni was already giving a hint about that in the REST API and when writing node status into DCS, but there were no easy way to display it. - Make show-config to work with cluster_name from config file (Alexander) It works similar to the `patronictl edit-config` **Stability improvements** - Avoid calling pg_controldata during bootstrap (Alexander) During initdb or custom bootstrap there is a time window when pgdata is not empty but pg_controldata has not been written yet. In such case pg_controldata call was failing with error messages. - Handle exceptions raised from psutil (Alexander) cmdline is read and parsed every time when `cmdline()` method is called. It could happen that the process being examined has already disappeared, in that case `NoSuchProcess` is raised. **Kubernetes support improvements** - Don't swallow errors from k8s API (Alexander) A call to Kubernetes API could fail for a different number of reasons. In some cases such call should be retried, in some other cases we should log the error message and the exception stack trace. The change here will help debug Kubernetes permission issues. - Update Kubernetes example Dockerfile to install Patroni from the master branch (Maciej Szulik) Before that it was using `feature/k8s`, which became outdated. - Add proper RBAC to run patroni on k8s (Maciej) Add the Service account that is assigned to the pods of the cluster, the role that holds only the necessary permissions, and the rolebinding that connects the Service account and the Role. Version 1.4.1 ------------- **Fixes in patronictl** - Don't show current leader in suggested list of members to failover to. (Alexander Kukushkin) patronictl failover could still work when there is leader in the cluster and it should be excluded from the list of member where it is possible to failover to. - Make patronictl switchover compatible with the old Patroni api (Alexander) In case if POST /switchover REST API call has failed with status code 501 it will do it once again, but to /failover endpoint. Version 1.4 ----------- This version adds support for using Kubernetes as a DCS, allowing to run Patroni as a cloud-native agent in Kubernetes without any additional deployments of Etcd, Zookeeper or Consul. **Upgrade notice** Installing Patroni via pip will no longer bring in dependencies for (such as libraries for Etcd, Zookeper, Consul or Kubernetes, or support for AWS). In order to enable them one need to list them in pip install command explicitely, for instance `pip install patroni[etcd,kubernetes]`. **Kubernetes support** Implement Kubernetes-based DCS. The endpoints meta-data is used in order to store the configuration and the leader key. The meta-data field inside the pods definition is used to store the member-related data. In addition to using Endpoints, Patroni supports ConfigMaps. You can find more information about this feature in the :ref:`Kubernetes chapter of the documentation ` **Stability improvements** - Factor out postmaster process into a separate object (Ants Aasma) This object identifies a running postmaster process via pid and start time and simplifies detection (and resolution) of situations when the postmaster was restarted behind our back or when postgres directory disappeared from the file system. - Minimize the amount of SELECT's issued by Patroni on every loop of HA cylce (Alexander Kukushkin) On every iteration of HA loop Patroni needs to know recovery status and absolute wal position. From now on Patroni will run only single SELECT to get this information instead of two on the replica and three on the master. - Remove leader key on shutdown only when we have the lock (Ants) Unconditional removal was generating unnecessary and missleading exceptions. **Improvements in patronictl** - Add version command to patronictl (Ants) It will show the version of installed Patroni and versions of running Patroni instances (if the cluster name is specified). - Make optional specifying cluster_name argument for some of patronictl commands (Alexander, Ants) It will work if patronictl is using usual Patroni configuration file with the ``scope`` defined. - Show information about scheduled switchover and maintenance mode (Alexander) Before that it was possible to get this information only from Patroni logs or directly from DCS. - Improve ``patronictl reinit`` (Alexander) Sometimes ``patronictl reinit`` refused to proceed when Patroni was busy with other actions, namely trying to start postgres. `patronictl` didn't provide any commands to cancel such long running actions and the only (dangerous) workarond was removing a data directory manually. The new implementation of `reinit` forcefully cancells other long-running actions before proceeding with reinit. - Implement ``--wait`` flag in ``patronictl pause`` and ``patronictl resume`` (Alexander) It will make ``patronictl`` wait until the requested action is acknowledged by all nodes in the cluster. Such behaviour is achieved by exposing the ``pause`` flag for every node in DCS and via the REST API. - Rename ``patronictl failover`` into ``patronictl switchover`` (Alexander) The previous ``failover`` was actually only capable of doing a switchover; it refused to proceed in a cluster without the leader. - Alter the behavior of ``patronictl failover`` (Alexander) It will work even if there is no leader, but in that case you will have to explicitely specify a node which should become the new leader. **Expose information about timeline and history** - Expose current timeline in DCS and via API (Alexander) Store information about the current timeline for each member of the cluster. This information is accessible via the API and is stored in the DCS - Store promotion history in the /history key in DCS (Alexander) In addition, store the timeline history enriched with the timestamp of the corresponding promotion in the /history key in DCS and update it with each promote. **Add endpoints for getting synchronous and asynchronous replicas** - Add new /sync and /async endpoints (Alexander, Oleksii Kliukin) Those endpoints (also accessible as /synchronous and /asynchronous) return 200 only for synchronous and asynchornous replicas correspondingly (exclusing those marked as `noloadbalance`). **Allow multiple hosts for Etcd** - Add a new `hosts` parameter to Etcd configuration (Alexander) This parameter should contain the initial list of hosts that will be used to discover and populate the list of the running etcd cluster members. If for some reason during work this list of discovered hosts is exhausted (no available hosts from that list), Patroni will return to the initial list from the `hosts` parameter. Version 1.3.6 ------------- **Stability improvements** - Verify process start time when checking if postgres is running. (Ants Aasma) After a crash that doesn't clean up postmaster.pid there could be a new process with the same pid, resulting in a false positive for is_running(), which will lead to all kinds of bad behavior. - Shutdown postgresql before bootstrap when we lost data directory (ainlolcat) When data directory on the master is forcefully removed, postgres process can still stay alive for some time and prevent the replica created in place of that former master from starting or replicating. The fix makes Patroni cache the postmaster pid and its start time and let it terminate the old postmaster in case it is still running after the corresponding data directory has been removed. - Perform crash recovery in a single user mode if postgres master dies (Alexander Kukushkin) It is unsafe to start immediately as a standby and not possible to run ``pg_rewind`` if postgres hasn't been shut down cleanly. The single user crash recovery only kicks in if ``pg_rewind`` is enabled or there is no master at the moment. **Consul improvements** - Make it possible to provide datacenter configuration for Consul (Vilius Okockis, Alexander) Before that Patroni was always communicating with datacenter of the host it runs on. - Always send a token in X-Consul-Token http header (Alexander) If ``consul.token`` is defined in Patroni configuration, we will always send it in the 'X-Consul-Token' http header. python-consul module tries to be "consistent" with Consul REST API, which doesn't accept token as a query parameter for `session API `__, but it still works with 'X-Consul-Token' header. - Adjust session TTL if supplied value is smaller than the minimum possible (Stas Fomin, Alexander) It could happen that the TTL provided in the Patroni configuration is smaller than the minimum one supported by Consul. In that case, Consul agent fails to create a new session. Without a session Patroni cannot create member and leader keys in the Consul KV store, resulting in an unhealthy cluster. **Other improvements** - Define custom log format via environment variable ``PATRONI_LOGFORMAT`` (Stas) Allow disabling timestamps and other similar fields in Patroni logs if they are already added by the system logger (usually when Patroni runs as a service). Version 1.3.5 ------------- **Bugfix** - Set role to 'uninitialized' if data directory was removed (Alexander Kukushkin) If the node was running as a master it was preventing from failover. **Stability improvement** - Try to run postmaster in a single-user mode if we tried and failed to start postgres (Alexander) Usually such problem happens when node running as a master was terminated and timelines were diverged. If ``recovery.conf`` has ``restore_command`` defined, there are really high chances that postgres will abort startup and leave controldata unchanged. It makes impossible to use ``pg_rewind``, which requires a clean shutdown. **Consul improvements** - Make it possible to specify health checks when creating session (Alexander) If not specified, Consul will use "serfHealth". From one side it allows fast detection of isolated master, but from another side it makes it impossible for Patroni to tolerate short network lags. **Bugfix** - Fix watchdog on Python 3 (Ants Aasma) A misunderstanding of the ioctl() call interface. If mutable=False then fcntl.ioctl() actually returns the arg buffer back. This accidentally worked on Python2 because int and str comparison did not return an error. Error reporting is actually done by raising IOError on Python2 and OSError on Python3. Version 1.3.4 ------------- **Different Consul improvements** - Pass the consul token as a header (Andrew Colin Kissa) Headers are now the prefered way to pass the token to the consul `API `__. - Advanced configuration for Consul (Alexander Kukushkin) possibility to specify ``scheme``, ``token``, client and ca certificates :ref:`details `. - compatibility with python-consul-0.7.1 and above (Alexander) new python-consul module has changed signature of some methods - "Could not take out TTL lock" message was never logged (Alexander) Not a critical bug, but lack of proper logging complicates investigation in case of problems. **Quote synchronous_standby_names using quote_ident** - When writing ``synchronous_standby_names`` into the ``postgresql.conf`` its value must be quoted (Alexander) If it is not quoted properly, PostgreSQL will effectively disable synchronous replication and continue to work. **Different bugfixes around pause state, mostly related to watchdog** (Alexander) - Do not send keepalives if watchdog is not active - Avoid activating watchdog in a pause mode - Set correct postgres state in pause mode - Do not try to run queries from API if postgres is stopped Version 1.3.3 ------------- **Bugfixes** - synchronous replication was disabled shortly after promotion even when synchronous_mode_strict was turned on (Alexander Kukushkin) - create empty ``pg_ident.conf`` file if it is missing after restoring from the backup (Alexander) - open access in ``pg_hba.conf`` to all databases, not only postgres (Franco Bellagamba) Version 1.3.2 ------------- **Bugfix** - patronictl edit-config didn't work with ZooKeeper (Alexander Kukushkin) Version 1.3.1 ------------- **Bugfix** - failover via API was broken due to change in ``_MemberStatus`` (Alexander Kukushkin) Version 1.3 ----------- Version 1.3 adds custom bootstrap possibility, significantly improves support for pg_rewind, enhances the synchronous mode support, adds configuration editing to patronictl and implements watchdog support on Linux. In addition, this is the first version to work correctly with PostgreSQL 10. **Upgrade notice** There are no known compatibility issues with the new version of Patroni. Configuration from version 1.2 should work without any changes. It is possible to upgrade by installing new packages and either restarting Patroni (will cause PostgreSQL restart), or by putting Patroni into a :ref:`pause mode ` first and then restarting Patroni on all nodes in the cluster (Patroni in a pause mode will not attempt to stop/start PostgreSQL), resuming from the pause mode at the end. **Custom bootstrap** - Make the process of bootstrapping the cluster configurable (Alexander Kukushkin) Allow custom bootstrap scripts instead of ``initdb`` when initializing the very first node in the cluster. The bootstrap command receives the name of the cluster and the path to the data directory. The resulting cluster can be configured to perform recovery, making it possible to bootstrap from a backup and do point in time recovery. Refer to the :ref:`documentaton page ` for more detailed description of this feature. **Smarter pg_rewind support** - Decide on whether to run pg_rewind by looking at the timeline differences from the current master (Alexander) Previously, Patroni had a fixed set of conditions to trigger pg_rewind, namely when starting a former master, when doing a switchover to the designated node for every other node in the cluster or when there is a replica with the nofailover tag. All those cases have in common a chance that some replica may be ahead of the new master. In some cases, pg_rewind did nothing, in some other ones it was not running when necessary. Instead of relying on this limited list of rules make Patroni compare the master and the replica WAL positions (using the streaming replication protocol) in order to reliably decide if rewind is necessary for the replica. **Synchronous replication mode strict** - Enhance synchronous replication support by adding the strict mode (James Sewell, Alexander) Normally, when ``synchronous_mode`` is enabled and there are no replicas attached to the master, Patroni will disable synchronous replication in order to keep the master available for writes. The ``synchronous_mode_strict`` option changes that, when it is set Patroni will not disable the synchronous replication in a lack of replicas, effectively blocking all clients writing data to the master. In addition to the synchronous mode guarantee of preventing any data loss due to automatic failover, the strict mode ensures that each write is either durably stored on two nodes or not happening altogether if there is only one node in the cluster. **Configuration editing with patronictl** - Add configuration editing to patronictl (Ants Aasma, Alexander) Add the ability to patronictl of editing dynamic cluster configuration stored in DCS. Support either specifying the parameter/values from the command-line, invoking the $EDITOR, or applying configuration from the yaml file. **Linux watchdog support** - Implement watchdog support for Linux (Ants) Support Linux software watchdog in order to reboot the node where Patroni is not running or not responding (e.g because of the high load) The Linux software watchdog reboots the non-responsive node. It is possible to configure the watchdog device to use (`/dev/watchdog` by default) and the mode (on, automatic, off) from the watchdog section of the Patroni configuration. You can get more information from the :ref:`watchdog documentation `. **Add support for PostgreSQL 10** - Patroni is compatible with all beta versions of PostgreSQL 10 released so far and we expect it to be compatible with the PostgreSQL 10 when it will be released. **PostgreSQL-related minor improvements** - Define pg_hba.conf via the Patroni configuration file or the dynamic configuration in DCS (Alexander) Allow to define the contents of ``pg_hba.conf`` in the ``pg_hba`` sub-section of the ``postgresql`` section of the configuration. This simplifies managing ``pg_hba.conf`` on multiple nodes, as one needs to define it only ones in DCS instead of logging to every node, changing it manually and reload the configuration. When defined, the contents of this section will replace the current ``pg_hba.conf`` completely. Patroni ignores it if ``hba_file`` PostgreSQL parameter is set. - Support connecting via a UNIX socket to the local PostgreSQL cluster (Alexander) Add the ``use_unix_socket`` option to the ``postgresql`` section of Patroni configuration. When set to true and the PostgreSQL ``unix_socket_directories`` option is not empty, enables Patroni to use the first value from it to connect to the local PostgreSQL cluster. If ``unix_socket_directories`` is not defined, Patroni will assume its default value and omit the ``host`` parameter in the PostgreSQL connection string altogether. - Support change of superuser and replication credentials on reload (Alexander) - Support storing of configuration files outside of PostgreSQL data directory (@jouir) Add the new configuration ``postgresql`` configuration directive ``config_dir``. It defaults to the data directory and must be writable by Patroni. **Bug fixes and stability improvements** - Handle EtcdEventIndexCleared and EtcdWatcherCleared exceptions (Alexander) Faster recovery when the watch operation is ended by Etcd by avoiding useless retries. - Remove error spinning on Etcd failure and reduce log spam (Ants) Avoid immediate retrying and emitting stack traces in the log on the second and subsequent Etcd connection failures. - Export locale variables when forking PostgreSQL processes (Oleksii Kliukin) Avoid the `postmaster became multithreaded during startup` fatal error on non-English locales for PostgreSQL built with NLS. - Extra checks when dropping the replication slot (Alexander) In some cases Patroni is prevented from dropping the replication slot by the WAL sender. - Truncate the replication slot name to 63 (NAMEDATALEN - 1) characters to comply with PostgreSQL naming rules (Nick Scott) - Fix a race condition resulting in extra connections being opened to the PostgreSQL cluster from Patroni (Alexander) - Release the leader key when the node restarts with an empty data directory (Alex Kerney) - Set asynchronous executor busy when running bootstrap without a leader (Alexander) Failure to do so could have resulted in errors stating the node belonged to a different cluster, as Patroni proceeded with the normal business while being bootstrapped by a bootstrap method that doesn't require a leader to be present in the cluster. - Improve WAL-E replica creation method (Joar Wandborg, Alexander). - Use csv.DictReader when parsing WAL-E base backup, accepting ISO dates with space-delimited date and time. - Support fetching current WAL position from the replica to estimate the amount of WAL to restore. Previously, the code used to call system information functions that were available only on the master node. Version 1.2 ----------- This version introduces significant improvements over the handling of synchronous replication, makes the startup process and failover more reliable, adds PostgreSQL 9.6 support and fixes plenty of bugs. In addition, the documentation, including these release notes, has been moved to https://patroni.readthedocs.io. **Synchronous replication** - Add synchronous replication support. (Ants Aasma) Adds a new configuration variable ``synchronous_mode``. When enabled, Patroni will manage ``synchronous_standby_names`` to enable synchronous replication whenever there are healthy standbys available. When synchronous mode is enabled, Patroni will automatically fail over only to a standby that was synchronously replicating at the time of the master failure. This effectively means that no user visible transaction gets lost in such a case. See the :ref:`feature documentation ` for the detailed description and implementation details. **Reliability improvements** - Do not try to update the leader position stored in the ``leader optime`` key when PostgreSQL is not 100% healthy. Demote immediately when the update of the leader key failed. (Alexander Kukushkin) - Exclude unhealthy nodes from the list of targets to clone the new replica from. (Alexander) - Implement retry and timeout strategy for Consul similar to how it is done for Etcd. (Alexander) - Make ``--dcs`` and ``--config-file`` apply to all options in ``patronictl``. (Alexander) - Write all postgres parameters into postgresql.conf. (Alexander) It allows starting PostgreSQL configured by Patroni with just ``pg_ctl``. - Avoid exceptions when there are no users in the config. (Kirill Pushkin) - Allow pausing an unhealthy cluster. Before this fix, ``patronictl`` would bail out if the node it tries to execute pause on is unhealthy. (Alexander) - Improve the leader watch functionality. (Alexander) Previously the replicas were always watching the leader key (sleeping until the timeout or the leader key changes). With this change, they only watch when the replica's PostgreSQL is in the ``running`` state and not when it is stopped/starting or restarting PostgreSQL. - Avoid running into race conditions when handling SIGCHILD as a PID 1. (Alexander) Previously a race condition could occur when running inside the Docker containers, since the same process inside Patroni both spawned new processes and handled SIGCHILD from them. This change uses fork/execs for Patroni and leaves the original PID 1 process responsible for handling signals from children. - Fix WAL-E restore. (Oleksii Kliukin) Previously WAL-E restore used the ``no_master`` flag to avoid consulting with the master altogether, making Patroni always choose restoring from WAL over the ``pg_basebackup``. This change reverts it to the original meaning of ``no_master``, namely Patroni WAL-E restore may be selected as a replication method if the master is not running. The latter is checked by examining the connection string passed to the method. In addition, it makes the retry mechanism more robust and handles other minutia. - Implement asynchronous DNS resolver cache. (Alexander) Avoid failing when DNS is temporary unavailable (for instance, due to an excessive traffic received by the node). - Implement starting state and master start timeout. (Ants, Alexander) Previously ``pg_ctl`` waited for a timeout and then happily trodded on considering PostgreSQL to be running. This caused PostgreSQL to show up in listings as running when it was actually not and caused a race condition that resulted in either a failover, or a crash recovery, or a crash recovery interrupted by failover and a missed rewind. This change adds a ``master_start_timeout`` parameter and introduces a new state for the main HA loop: ``starting``. When ``master_start_timeout`` is 0 we will failover immediately when the master crashes as soon as there is a failover candidate. Otherwise, Patroni will wait after attempting to start PostgreSQL on the master for the duration of the timeout; when it expires, it will failover if possible. Manual failover requests will be honored during the crash of the master even before the timeout expiration. Introduce the ``timeout`` parameter to the ``restart`` API endpoint and ``patronictl``. When it is set and restart takes longer than the timeout, PostgreSQL is considered unhealthy and the other nodes becomes eligible to take the leader lock. - Fix ``pg_rewind`` behavior in a pause mode. (Ants) Avoid unnecessary restart in a pause mode when Patroni thinks it needs to rewind but rewind is not possible (i.e. ``pg_rewind`` is not present). Fallback to default ``libpq`` values for the ``superuser`` (default OS user) if ``superuser`` authentication is missing from the ``pg_rewind`` related Patroni configuration section. - Serialize callback execution. Kill the previous callback of the same type when the new one is about to run. Fix the issue of spawning zombie processes when running callbacks. (Alexander) - Avoid promoting a former master when the leader key is set in DCS but update to this leader key fails. (Alexander) This avoids the issue of a current master continuing to keep its role when it is partitioned together with the minority of nodes in Etcd and other DCSs that allow "inconsistent reads". **Miscellaneous** - Add ``post_init`` configuration option on bootstrap. (Alejandro Martínez) Patroni will call the script argument of this option right after running ``initdb`` and starting up PostgreSQL for a new cluster. The script receives a connection URL with ``superuser`` and sets ``PGPASSFILE`` to point to the ``.pgpass`` file containing the password. If the script fails, Patroni initialization fails as well. It is useful for adding new users or creating extensions in the new cluster. - Implement PostgreSQL 9.6 support. (Alexander) Use ``wal_level = replica`` as a synonym for ``hot_standby``, avoiding pending_restart flag when it changes from one to another. (Alexander) **Documentation improvements** - Add a Patroni main `loop workflow diagram `__. (Alejandro, Alexander) - Improve README, adding the Helm chart and links to release notes. (Lauri Apple) - Move Patroni documentation to ``Read the Docs``. The up-to-date documentation is available at https://patroni.readthedocs.io. (Oleksii) Makes the documentation easily viewable from different devices (including smartphones) and searchable. - Move the package to the semantic versioning. (Oleksii) Patroni will follow the major.minor.patch version schema to avoid releasing the new minor version on small but critical bugfixes. We will only publish the release notes for the minor version, which will include all patches. Version 1.1 ----------- This release improves management of Patroni cluster by bring in pause mode, improves maintenance with scheduled and conditional restarts, makes Patroni interaction with Etcd or Zookeeper more resilient and greatly enhances patronictl. **Upgrade notice** When upgrading from releases below 1.0 read about changing of credentials and configuration format at 1.0 release notes. **Pause mode** - Introduce pause mode to temporary detach Patroni from managing PostgreSQL instance (Murat Kabilov, Alexander Kukushkin, Oleksii Kliukin). Previously, one had to send SIGKILL signal to Patroni to stop it without terminating PostgreSQL. The new pause mode detaches Patroni from PostgreSQL cluster-wide without terminating Patroni. It is similar to the maintenance mode in Pacemaker. Patroni is still responsible for updating member and leader keys in DCS, but it will not start, stop or restart PostgreSQL server in the process. There are a few exceptions, for instance, manual failovers, reinitializes and restarts are still allowed. You can read :ref:`a detailed description of this feature `. In addition, patronictl supports new ``pause`` and ``resume`` commands to toggle the pause mode. **Scheduled and conditional restarts** - Add conditions to the restart API command (Oleksii) This change enhances Patroni restarts by adding a couple of conditions that can be verified in order to do the restart. Among the conditions are restarting when PostgreSQL role is either a master or a replica, checking the PostgreSQL version number or restarting only when restart is necessary in order to apply configuration changes. - Add scheduled restarts (Oleksii) It is now possible to schedule a restart in the future. Only one scheduled restart per node is supported. It is possible to clear the scheduled restart if it is not needed anymore. A combination of scheduled and conditional restarts is supported, making it possible, for instance, to scheduled minor PostgreSQL upgrades in the night, restarting only the instances that are running the outdated minor version without adding postgres-specific logic to administration scripts. - Add support for conditional and scheduled restarts to patronictl (Murat). patronictl restart supports several new options. There is also patronictl flush command to clean the scheduled actions. **Robust DCS interaction** - Set Kazoo timeouts depending on the loop_wait (Alexander) Originally, ping_timeout and connect_timeout values were calculated from the negotiated session timeout. Patroni loop_wait was not taken into account. As a result, a single retry could take more time than the session timeout, forcing Patroni to release the lock and demote. This change set ping and connect timeout to half of the value of loop_wait, speeding up detection of connection issues and leaving enough time to retry the connection attempt before loosing the lock. - Update Etcd topology only after original request succeed (Alexander) Postpone updating the Etcd topology known to the client until after the original request. When retrieving the cluster topology, implement the retry timeouts depending on the known number of nodes in the Etcd cluster. This makes our client prefer to get the results of the request to having the up-to-date list of nodes. Both changes make Patroni connections to DCS more robust in the face of network issues. **Patronictl, monitoring and configuration** - Return information about streaming replicas via the API (Feike Steenbergen) Previously, there was no reliable way to query Patroni about PostgreSQL instances that fail to stream changes (for instance, due to connection issues). This change exposes the contents of pg_stat_replication via the /patroni endpoint. - Add patronictl scaffold command (Oleksii) Add a command to create cluster structure in Etcd. The cluster is created with user-specified sysid and leader, and both leader and member keys are made persistent. This command is useful to create so-called master-less configurations, where Patroni cluster consisting of only replicas replicate from the external master node that is unaware of Patroni. Subsequently, one may remove the leader key, promoting one of the Patroni nodes and replacing the original master with the Patroni-based HA cluster. - Add configuration option ``bin_dir`` to locate PostgreSQL binaries (Ants Aasma) It is useful to be able to specify the location of PostgreSQL binaries explicitly when Linux distros that support installing multiple PostgreSQL versions at the same time. - Allow configuration file path to be overridden using ``custom_conf`` of (Alejandro Martínez) Allows for custom configuration file paths, which will be unmanaged by Patroni, :ref:`details `. **Bug fixes and code improvements** - Make Patroni compatible with new version schema in PostgreSQL 10 and above (Feike) Make sure that Patroni understand 2-digits version numbers when doing conditional restarts based on the PostgreSQL version. - Use pkgutil to find DCS modules (Alexander) Use the dedicated python module instead of traversing directories manually in order to find DCS modules. - Always call on_start callback when starting Patroni (Alexander) Previously, Patroni did not call any callbacks when attaching to the already running node with the correct role. Since callbacks are often used to route client connections that could result in the failure to register the running node in the connection routing scheme. With this fix, Patroni calls on_start callback even when attaching to the already running node. - Do not drop active replication slots (Murat, Oleksii) Avoid dropping active physical replication slots on master. PostgreSQL cannot drop such slots anyway. This change makes possible to run non-Patroni managed replicas/consumers on the master. - Close Patroni connections during start of the PostgreSQL instance (Alexander) Forces Patroni to close all former connections when PostgreSQL node is started. Avoids the trap of reusing former connections if postmaster was killed with SIGKILL. - Replace invalid characters when constructing slot names from member names (Ants) Make sure that standby names that do not comply with the slot naming rules don't cause the slot creation and standby startup to fail. Replace the dashes in the slot names with underscores and all other characters not allowed in slot names with their unicode codepoints. Version 1.0 ----------- This release introduces the global dynamic configuration that allows dynamic changes of the PostgreSQL and Patroni configuration parameters for the entire HA cluster. It also delivers numerous bugfixes. **Upgrade notice** When upgrading from v0.90 or below, always upgrade all replicas before the master. Since we don't store replication credentials in DCS anymore, an old replica won't be able to connect to the new master. **Dynamic Configuration** - Implement the dynamic global configuration (Alexander Kukushkin) Introduce new REST API endpoint /config to provide PostgreSQL and Patroni configuration parameters that should be set globally for the entire HA cluster (master and all the replicas). Those parameters are set in DCS and in many cases can be applied without disrupting PostgreSQL or Patroni. Patroni sets a special flag called "pending restart" visible via the API when some of the values require the PostgreSQL restart. In that case, restart should be issued manually via the API. Patroni SIGHUP or POST to /reload will make it re-read the configuration file. See the :ref:`dynamic configuration ` for the details on which parameters can be changed and the order of processing difference configuration sources. The configuration file format *has changed* since the v0.90. Patroni is still compatible with the old configuration files, but in order to take advantage of the bootstrap parameters one needs to change it. Users are encourage to update them by referring to the :ref:`dynamic configuraton documentation page `. **More flexible configuration*** - Make postgresql configuration and database name Patroni connects to configurable (Misja Hoebe) Introduce `database` and `config_base_name` configuration parameters. Among others, it makes possible to run Patroni with PipelineDB and other PostgreSQL forks. - Implement possibility to configure some Patroni configuration parameters via environment (Alexander) Those include the scope, the node name and the namespace, as well as the secrets and makes it easier to run Patroni in a dynamic environment, i.e. Kubernetes Please, refer to the :ref:`supported environment variables ` for further details. - Update the built-in Patroni docker container to take advantage of environment-based configuration (Feike Steenbergen). - Add Zookeeper support to Patroni docker image (Alexander) - Split the Zookeeper and Exhibitor configuration options (Alexander) - Make patronictl reuse the code from Patroni to read configuration (Alexander) This allows patronictl to take advantage of environment-based configuration. - Set application name to node name in primary_conninfo (Alexander) This simplifies identification and configuration of synchronous replication for a given node. **Stability, security and usability improvements** - Reset sysid and do not call pg_controldata when restore of backup in progress (Alexander) This change reduces the amount of noise generated by Patroni API health checks during the lengthy initialization of this node from the backup. - Fix a bunch of pg_rewind corner-cases (Alexander) Avoid running pg_rewind if the source cluster is not the master. In addition, avoid removing the data directory on an unsuccessful rewind, unless the new parameter *remove_data_directory_on_rewind_failure* is set to true. By default it is false. - Remove passwords from the replication connection string in DCS (Alexander) Previously, Patroni always used the replication credentials from the Postgres URL in DCS. That is now changed to take the credentials from the patroni configuration. The secrets (replication username and password) and no longer exposed in DCS. - Fix the asynchronous machinery around the demote call (Alexander) Demote now runs totally asynchronously without blocking the DCS interactions. - Make patronictl always send the authorization header if it is configured (Alexander) This allows patronictl to issue "protected" requests, i.e. restart or reinitialize, when Patroni is configured to require authorization on those. - Handle the SystemExit exception correctly (Alexander) Avoids the issues of Patroni not stopping properly when receiving the SIGTERM - Sample haproxy templates for confd (Alexander) Generates and dynamically changes haproxy configuration from the patroni state in the DCS using confide - Improve and restructure the documentation to make it more friendly to the new users (Lauri Apple) - API must report role=master during pg_ctl stop (Alexander) Makes the callback calls more reliable, particularly in the cluster stop case. In addition, introduce the `pg_ctl_timeout` option to set the timeout for the start, stop and restart calls via the `pg_ctl`. - Fix the retry logic in etcd (Alexander) Make retries more predictable and robust. - Make Zookeeper code more resilient against short network hiccups (Alexander) Reduce the connection timeouts to make Zookeeper connection attempts more frequent. Version 0.90 ------------ This releases adds support for Consul, includes a new *noloadbalance* tag, changes the behavior of the *clonefrom* tag, improves *pg_rewind* handling and improves *patronictl* control program. **Consul support** - Implement Consul support (Alexander Kukushkin) Patroni runs against Consul, in addition to Etcd and Zookeeper. the connection parameters can be configured in the YAML file. **New and improved tags** - Implement *noloadbalance* tag (Alexander) This tag makes Patroni always return that the replica is not available to the load balancer. - Change the implementation of the *clonefrom* tag (Alexander) Previously, a node name had to be supplied to the *clonefrom*, forcing a tagged replica to clone from the specific node. The new implementation makes *clonefrom* a boolean tag: if it is set to true, the replica becomes a candidate for other replicas to clone from it. When multiple candidates are present, the replicas picks one randomly. **Stability and security improvements** - Numerous reliability improvements (Alexander) Removes some spurious error messages, improves the stability of the failover, addresses some corner cases with reading data from DCS, shutdown, demote and reattaching of the former leader. - Improve systems script to avoid killing Patroni children on stop (Jan Keirse, Alexander Kukushkin) Previously, when stopping Patroni, *systemd* also sent a signal to PostgreSQL. Since Patroni also tried to stop PostgreSQL by itself, it resulted in sending to different shutdown requests (the smart shutdown, followed by the fast shutdown). That resulted in replicas disconnecting too early and a former master not being able to rejoin after demote. Fix by Jan with prior research by Alexander. - Eliminate some cases where the former master was unable to call pg_rewind before rejoining as a replica (Oleksii Kliukin) Previously, we only called *pg_rewind* if the former master had crashed. Change this to always run pg_rewind for the former master as long as pg_rewind is present in the system. This fixes the case when the master is shut down before the replicas managed to get the latest changes (i.e. during the "smart" shutdown). - Numerous improvements to unit- and acceptance- tests, in particular, enable support for Zookeeper and Consul (Alexander). - Make Travis CI faster and implement support for running tests against Zookeeper (Exhibitor) and Consul (Alexander) Both unit and acceptance tests run automatically against Etcd, Zookeeper and Consul on each commit or pull-request. - Clear environment variables before calling PostgreSQL commands from Patroni (Feike Steenbergen) This prevents a possibility of reading system environment variables by connecting to the PostgreSQL cluster managed by Patroni. **Configuration and control changes** - Unify patronictl and Patroni configuration (Feike) patronictl can use the same configuration file as Patroni itself. - Enable Patroni to read the configuration from the environment variables (Oleksii) This simplifies generating configuration for Patroni automatically, or merging a single configuration from different sources. - Include database system identifier in the information returned by the API (Feike) - Implement *delete_cluster* for all available DCSs (Alexander) Enables support for DCSs other than Etcd in patronictl. Version 0.80 ------------ This release adds support for *cascading replication* and simplifies Patroni management by providing *scheduled failovers*. One may use older versions of Patroni (in particular, 0.78) combined with this one in order to migrate to the new release. Note that the scheduled failover and cascading replication related features will only work with Patroni 0.80 and above. **Cascading replication** - Add support for the *replicatefrom* and *clonefrom* tags for the patroni node (Oleksii Kliukin). The tag *replicatefrom* allows a replica to use an arbitrary node a source, not necessary the master. The *clonefrom* does the same for the initial backup. Together, they enable Patroni to fully support cascading replication. - Add support for running replication methods to initialize the replica even without a running replication connection (Oleksii). This is useful in order to create replicas from the snapshots stored on S3 or FTP. A replication method that does not require a running replication connection should supply *no_master: true* in the yaml configuration. Those scripts will still be called in order if the replication connection is present. **Patronictl, API and DCS improvements** - Implement scheduled failovers (Feike Steenbergen). Failovers can be scheduled to happen at a certain time in the future, using either patronictl, or API calls. - Add support for *dbuser* and *password* parameters in patronictl (Feike). - Add PostgreSQL version to the health check output (Feike). - Improve Zookeeper support in patronictl (Oleksandr Shulgin) - Migrate to python-etcd 0.43 (Alexander Kukushkin) **Configuration** - Add a sample systems configuration script for Patroni (Jan Keirse). - Fix the problem of Patroni ignoring the superuser name specified in the configuration file for DB connections (Alexander). - Fix the handling of CTRL-C by creating a separate session ID and process group for the postmaster launched by Patroni (Alexander). **Tests** - Add acceptance tests with *behave* in order to check real-world scenarios of running Patroni (Alexander, Oleksii). The tests can be launched manually using the *behave* command. They are also launched automatically for pull requests and after commits. Release notes for some older versions can be found on `project's github page `__. patroni-1.6.4/docs/replica_bootstrap.rst000066400000000000000000000224541361356115100204050ustar00rootroot00000000000000Replica imaging and bootstrap ============================= Patroni allows customizing creation of a new replica. It also supports defining what happens when the new empty cluster is being bootstrapped. The distinction between two is well defined: Patroni creates replicas only if the ``initialize`` key is present in DCS for the cluster. If there is no ``initialize`` key - Patroni calls bootstrap exclusively on the first node that takes the initialize key lock. .. _custom_bootstrap: Bootstrap --------- PostgreSQL provides ``initdb`` command to initialize a new cluster and Patroni calls it by default. In certain cases, particularly when creating a new cluster as a copy of an existing one, it is necessary to replace a built-in method with custom actions. Patroni supports executing user-defined scripts to bootstrap new clusters, supplying some required arguments to them, i.e. the name of the cluster and the path to the data directory. This is configured in the ``bootstrap`` section of the Patroni configuration. For example: .. code:: YAML bootstrap: method: : command: [param1 [, ...]] keep_existing_recovery_conf: False recovery_conf: recovery_target_action: promote recovery_target_timeline: latest restore_command: Each bootstrap method must define at least a ``name`` and a ``command``. A special ``initdb`` method is available to trigger the default behavior, in which case ``method`` parameter can be omitted altogether. The ``command`` can be specified using either an absolute path, or the one relative to the ``patroni`` command location. In addition to the fixed parameters defined in the configuration files, Patroni supplies two cluster-specific ones: --scope Name of the cluster to be bootstrapped --datadir Path to the data directory of the cluster instance to be bootstrapped If the bootstrap script returns 0, Patroni tries to configure and start the PostgreSQL instance produced by it. If any of the intermediate steps fail, or the script returns a non-zero value, Patroni assumes that the bootstrap has failed, cleans up after itself and releases the initialize lock to give another node the opportunity to bootstrap. If a ``recovery_conf`` block is defined in the same section as the custom bootstrap method, Patroni will generate a ``recovery.conf`` before starting the newly bootstrapped instance. Typically, such recovery.conf should contain at least one of the ``recovery_target_*`` parameters, together with the ``recovery_target_timeline`` set to ``promote``. If ``keep_existing_recovery_conf`` is defined and set to ``True``, Patroni will not remove the existing ``recovery.conf`` file if it exists. This is useful when bootstrapping from a backup with tools like pgBackRest that generate the appropriate ``recovery.conf`` for you. .. note:: Bootstrap methods are neither chained, nor fallen-back to the default one in case the primary one fails .. _custom_replica_creation: Building replicas ----------------- Patroni uses tried and proven ``pg_basebackup`` in order to create new replicas. One downside of it is that it requires a running master node. Another one is the lack of 'on-the-fly' compression for the backup data and no built-in cleanup for outdated backup files. Some people prefer other backup solutions, such as ``WAL-E``, ``pgBackRest``, ``Barman`` and others, or simply roll their own scripts. In order to accommodate all those use-cases Patroni supports running custom scripts to clone a new replica. Those are configured in the ``postgresql`` configuration block: .. code:: YAML postgresql: create_replica_methods: - : command: keep_data: True no_params: True no_master: 1 example: wal_e .. code:: YAML postgresql: create_replica_methods: - wal_e - basebackup wal_e: command: patroni_wale_restore no_master: 1 envdir: {{WALE_ENV_DIR}} use_iam: 1 basebackup: max-rate: '100M' example: pgbackrest .. code:: YAML postgresql: create_replica_methods: - pgbackrest - basebackup pgbackrest: command: /usr/bin/pgbackrest --stanza= --delta restore keep_data: True no_params: True basebackup: max-rate: '100M' The ``create_replica_methods`` defines available replica creation methods and the order of executing them. Patroni will stop on the first one that returns 0. Each method should define a separate section in the configuration file, listing the command to execute and any custom parameters that should be passed to that command. All parameters will be passed in a ``--name=value`` format. Besides user-defined parameters, Patroni supplies a couple of cluster-specific ones: --scope Which cluster this replica belongs to --datadir Path to the data directory of the replica --role Always 'replica' --connstring Connection string to connect to the cluster member to clone from (master or other replica). The user in the connection string can execute SQL and replication protocol commands. A special ``no_master`` parameter, if defined, allows Patroni to call the replica creation method even if there is no running master or replicas. In that case, an empty string will be passed in a connection string. This is useful for restoring the formerly running cluster from the binary backup. A special ``keep_data`` parameter, if defined, will instruct Patroni to not clean PGDATA folder before calling restore. A special ``no_params`` parameter, if defined, restricts passing parameters to custom command. A ``basebackup`` method is a special case: it will be used if ``create_replica_methods`` is empty, although it is possible to list it explicitly among the ``create_replica_methods`` methods. This method initializes a new replica with the ``pg_basebackup``, the base backup is taken from the master unless there are replicas with ``clonefrom`` tag, in which case one of such replicas will be used as the origin for pg_basebackup. It works without any configuration; however, it is possible to specify a ``basebackup`` configuration section. Same rules as with the other method configuration apply, namely, only long (with --) options should be specified there. Not all parameters make sense, if you override a connection string or provide an option to created tar-ed or compressed base backups, patroni won't be able to make a replica out of it. There is no validation performed on the names or values of the parameters passed to the ``basebackup`` section. You can specify basebackup parameters as either a map (key-value pairs) or a list of elements, where each element could be either a key-value pair or a single key (for options that does not receive any values, for instance, ``--verbose``). Consider those 2 examples: .. code:: YAML postgresql: basebackup: max-rate: '100M' checkpoint: 'fast' and .. code:: YAML postgresql: basebackup: - verbose - max-rate: '100M' If all replica creation methods fail, Patroni will try again all methods in order during the next event loop cycle. .. _standby_cluster: Standby cluster --------------- Another available option is to run a "standby cluster", that contains only of standby nodes replicating from some remote master. This type of clusters has: * "standby leader", that behaves pretty much like a regular cluster leader, except it replicates from a remote master. * cascade replicas, that are replicating from standby leader. Standby leader holds and updates a leader lock in DCS. If the leader lock expires, cascade replicas will perform an election to choose another leader from the standbys. For the sake of flexibility, you can specify methods of creating a replica and recovery WAL records when a cluster is in the "standby mode" by providing `create_replica_methods` key in `standby_cluster` section. It is distinct from creating replicas, when cluster is detached and functions as a normal cluster, which is controlled by `create_replica_methods` in `postgresql` section. Both "standby" and "normal" `create_replica_methods` reference keys in `postgresql` section. To configure such cluster you need to specify the section ``standby_cluster`` in a patroni configuration: .. code:: YAML bootstrap: dcs: standby_cluster: host: 1.2.3.4 port: 5432 primary_slot_name: patroni create_replica_methods: - basebackup Note, that these options will be applied only once during cluster bootstrap, and the only way to change them afterwards is through DCS. If you use replication slots on the standby cluster, you must also create the corresponding replication slot on the primary cluster. It will not be done automatically by the standby cluster implementation. You can use Patroni's permanent replication slots feature on the primary cluster to maintain a replication slot with the same name as ``primary_slot_name``, or its default value if ``primary_slot_name`` is not provided. patroni-1.6.4/docs/replication_modes.rst000066400000000000000000000165501361356115100203710ustar00rootroot00000000000000.. _replication_modes: ================= Replication modes ================= Patroni uses PostgreSQL streaming replication. For more information about streaming replication, see the `Postgres documentation `__. By default Patroni configures PostgreSQL for asynchronous replication. Choosing your replication schema is dependent on your business considerations. Investigate both async and sync replication, as well as other HA solutions, to determine which solution is best for you. Asynchronous mode durability ---------------------------- In asynchronous mode the cluster is allowed to lose some committed transactions to ensure availability. When the primary server fails or becomes unavailable for any other reason Patroni will automatically promote a sufficiently healthy standby to primary. Any transactions that have not been replicated to that standby remain in a "forked timeline" on the primary, and are effectively unrecoverable [1]_. The amount of transactions that can be lost is controlled via ``maximum_lag_on_failover`` parameter. Because the primary transaction log position is not sampled in real time, in reality the amount of lost data on failover is worst case bounded by ``maximum_lag_on_failover`` bytes of transaction log plus the amount that is written in the last ``ttl`` seconds (``loop_wait``/2 seconds in the average case). However typical steady state replication delay is well under a second. By default, when running leader elections, Patroni does not take into account the current timeline of replicas, what in some cases could be undesirable behavior. You can prevent the node not having the same timeline as a former master become the new leader by changing the value of ``check_timeline`` parameter to ``true``. PostgreSQL synchronous replication ---------------------------------- You can use Postgres's `synchronous replication `__ with Patroni. Synchronous replication ensures consistency across a cluster by confirming that writes are written to a secondary before returning to the connecting client with a success. The cost of synchronous replication: reduced throughput on writes. This throughput will be entirely based on network performance. In hosted datacenter environments (like AWS, Rackspace, or any network you do not control), synchronous replication significantly increases the variability of write performance. If followers become inaccessible from the leader, the leader effectively becomes read-only. To enable a simple synchronous replication test, add the following lines to the ``parameters`` section of your YAML configuration files: .. code:: YAML synchronous_commit: "on" synchronous_standby_names: "*" When using PostgreSQL synchronous replication, use at least three Postgres data nodes to ensure write availability if one host fails. Using PostgreSQL synchronous replication does not guarantee zero lost transactions under all circumstances. When the primary and the secondary that is currently acting as a synchronous replica fail simultaneously a third node that might not contain all transactions will be promoted. .. _synchronous_mode: Synchronous mode ---------------- For use cases where losing committed transactions is not permissible you can turn on Patroni's ``synchronous_mode``. When ``synchronous_mode`` is turned on Patroni will not promote a standby unless it is certain that the standby contains all transactions that may have returned a successful commit status to client [2]_. This means that the system may be unavailable for writes even though some servers are available. System administrators can still use manual failover commands to promote a standby even if it results in transaction loss. Turning on ``synchronous_mode`` does not guarantee multi node durability of commits under all circumstances. When no suitable standby is available, primary server will still accept writes, but does not guarantee their replication. When the primary fails in this mode no standby will be promoted. When the host that used to be the primary comes back it will get promoted automatically, unless system administrator performed a manual failover. This behavior makes synchronous mode usable with 2 node clusters. When ``synchronous_mode`` is on and a standby crashes, commits will block until next iteration of Patroni runs and switches the primary to standalone mode (worst case delay for writes ``ttl`` seconds, average case ``loop_wait``/2 seconds). Manually shutting down or restarting a standby will not cause a commit service interruption. Standby will signal the primary to release itself from synchronous standby duties before PostgreSQL shutdown is initiated. When it is absolutely necessary to guarantee that each write is stored durably on at least two nodes, enable ``synchronous_mode_strict`` in addition to the ``synchronous_mode``. This parameter prevents Patroni from switching off the synchronous replication on the primary when no synchronous standby candidates are available. As a downside, the primary is not be available for writes (unless the Postgres transaction explicitly turns of ``synchronous_mode``), blocking all client write requests until at least one synchronous replica comes up. You can ensure that a standby never becomes the synchronous standby by setting ``nosync`` tag to true. This is recommended to set for standbys that are behind slow network connections and would cause performance degradation when becoming a synchronous standby. Synchronous mode can be switched on and off via Patroni REST interface. See :ref:`dynamic configuration ` for instructions. Synchronous mode implementation ------------------------------- When in synchronous mode Patroni maintains synchronization state in the DCS, containing the latest primary and current synchronous standby. This state is updated with strict ordering constraints to ensure the following invariants: - A node must be marked as the latest leader whenever it can accept write transactions. Patroni crashing or PostgreSQL not shutting down can cause violations of this invariant. - A node must be set as the synchronous standby in PostgreSQL as long as it is published as the synchronous standby. - A node that is not the leader or current synchronous standby is not allowed to promote itself automatically. Patroni will only ever assign one standby to ``synchronous_standby_names`` because with multiple candidates it is not possible to know which node was acting as synchronous during the failure. On each HA loop iteration Patroni re-evaluates synchronous standby choice. If the current synchronous standby is connected and has not requested its synchronous status to be removed it remains picked. Otherwise the cluster member available for sync that is furthest ahead in replication is picked. .. [1] The data is still there, but recovering it requires a manual recovery effort by data recovery specialists. When Patroni is allowed to rewind with ``use_pg_rewind`` the forked timeline will be automatically erased to rejoin the failed primary with the cluster. .. [2] Clients can change the behavior per transaction using PostgreSQL's ``synchronous_commit`` setting. Transactions with ``synchronous_commit`` values of ``off`` and ``local`` may be lost on fail over, but will not be blocked by replication delays. patroni-1.6.4/docs/rest_api.rst000066400000000000000000000274521361356115100165020ustar00rootroot00000000000000.. _rest_api: Patroni REST API ================ Patroni has a rich REST API, which is used by Patroni itself during the leader race, by the ``patronictl`` tool in order to perform failovers/switchovers/reinitialize/restarts/reloads, by HAProxy or any other kind of load balancer to perform HTTP health checks, and of course could also be used for monitoring. Below you will find the list of Patroni REST API endpoints. Health check endpoints ---------------------- For all health check ``GET`` requests Patroni returns a JSON document with the status of the node, along with the HTTP status code. If you don't want or don't need the JSON document, you might consider using the ``OPTIONS`` method instead of ``GET``. - The following requests to Patroni REST API will return HTTP status code **200** only when the Patroni node is running as the leader: - ``GET /`` - ``GET /master`` - ``GET /leader`` - ``GET /primary`` - ``GET /read-write`` - ``GET /replica``: replica health check endpoint. It returns HTTP status code **200** only when the Patroni node is in the state ``running``, the role is ``replica`` and ``noloadbalance`` tag is not set. - ``GET /read-only``: like the above endpoint, but also includes the primary. - ``GET /standby-leader``: returns HTTP status code **200** only when the Patroni node is running as the leader in a :ref:`standby cluster `. - ``GET /synchronous`` or ``GET /sync``: returns HTTP status code **200** only when the Patroni node is running as a synchronous standby. - ``GET /asynchronous`` or ``GET /async``: returns HTTP status code **200** only when the Patroni node is running as an asynchronous standby. - ``GET /health``: returns HTTP status code **200** only when PostgreSQL is up and running. Monitoring endpoint ------------------- The ``GET /patroni`` is used by Patroni during the leader race. It also could be used by your monitoring system. The JSON document produced by this endpoint has the same structure as the JSON produced by the health check endpoints. .. code-block:: bash $ curl -s http://localhost:8008/patroni | jq . { "state": "running", "postmaster_start_time": "2019-09-24 09:22:32.555 CEST", "role": "master", "server_version": 110005, "cluster_unlocked": false, "xlog": { "location": 25624640 }, "timeline": 3, "database_system_identifier": "6739877027151648096", "patroni": { "version": "1.6.0", "scope": "batman" } } Cluster status endpoints ------------------------ - The ``GET /cluster`` endpoint generates a JSON document describing the current cluster topology and state: .. code-block:: bash $ curl -s http://localhost:8008/cluster | jq . { "members": [ { "name": "postgresql0", "host": "127.0.0.1", "port": 5432, "role": "leader", "state": "running", "api_url": "http://127.0.0.1:8008/patroni", "timeline": 5, "tags": { "clonefrom": true } }, { "name": "postgresql1", "host": "127.0.0.1", "port": 5433, "role": "replica", "state": "running", "api_url": "http://127.0.0.1:8009/patroni", "timeline": 5, "tags": { "clonefrom": true }, "lag": 0 } ], "scheduled_switchover": { "at": "2019-09-24T10:36:00+02:00", "from": "postgresql0" } } - The ``GET /history`` endpoint provides a view on the history of cluster switchovers/failovers. The format is very similar to the content of history files in the ``pg_wal`` directory. The only difference is the timestamp field showing when the new timeline was created. .. code-block:: bash $ curl -s http://localhost:8008/history | jq . [ [ 1, 25623960, "no recovery target specified", "2019-09-23T16:57:57+02:00" ], [ 2, 25624344, "no recovery target specified", "2019-09-24T09:22:33+02:00" ], [ 3, 25624752, "no recovery target specified", "2019-09-24T09:26:15+02:00" ], [ 4, 50331856, "no recovery target specified", "2019-09-24T09:35:52+02:00" ] ] Config endpoint --------------- ``GET /config``: Get the current version of the dynamic configuration: .. code-block:: bash $ curl -s localhost:8008/config | jq . { "ttl": 30, "loop_wait": 10, "retry_timeout": 10, "maximum_lag_on_failover": 1048576, "postgresql": { "use_slots": true, "use_pg_rewind": true, "parameters": { "hot_standby": "on", "wal_log_hints": "on", "wal_keep_segments": 8, "wal_level": "hot_standby", "max_wal_senders": 5, "max_replication_slots": 5, "max_connections": "100" } } } ``PATCH /config``: Change the existing configuration. .. code-block:: bash $ curl -s -XPATCH -d \ '{"loop_wait":5,"ttl":20,"postgresql":{"parameters":{"max_connections":"101"}}}' \ http://localhost:8008/config | jq . { "ttl": 20, "loop_wait": 5, "maximum_lag_on_failover": 1048576, "retry_timeout": 10, "postgresql": { "use_slots": true, "use_pg_rewind": true, "parameters": { "hot_standby": "on", "wal_log_hints": "on", "wal_keep_segments": 8, "wal_level": "hot_standby", "max_wal_senders": 5, "max_replication_slots": 5, "max_connections": "101" } } } The above REST API call patches the existing configuration and returns the new configuration. Let's check that the node processed this configuration. First of all it should start printing log lines every 5 seconds (loop_wait=5). The change of "max_connections" requires a restart, so the "pending_restart" flag should be exposed: .. code-block:: bash $ curl -s http://localhost:8008/patroni | jq . { "pending_restart": true, "database_system_identifier": "6287881213849985952", "postmaster_start_time": "2016-06-13 13:13:05.211 CEST", "xlog": { "location": 2197818976 }, "patroni": { "scope": "batman", "version": "1.0" }, "state": "running", "role": "master", "server_version": 90503 } Removing parameters: If you want to remove (reset) some setting just patch it with ``null``: .. code-block:: bash $ curl -s -XPATCH -d \ '{"postgresql":{"parameters":{"max_connections":null}}}' \ http://localhost:8008/config | jq . { "ttl": 20, "loop_wait": 5, "retry_timeout": 10, "maximum_lag_on_failover": 1048576, "postgresql": { "use_slots": true, "use_pg_rewind": true, "parameters": { "hot_standby": "on", "unix_socket_directories": ".", "wal_keep_segments": 8, "wal_level": "hot_standby", "wal_log_hints": "on", "max_wal_senders": 5, "max_replication_slots": 5 } } } The above call removes ``postgresql.parameters.max_connections`` from the dynamic configuration. ``PUT /config``: It's also possible to perform the full rewrite of an existing dynamic configuration unconditionally: .. code-block:: bash $ curl -s -XPUT -d \ '{"maximum_lag_on_failover":1048576,"retry_timeout":10,"postgresql":{"use_slots":true,"use_pg_rewind":true,"parameters":{"hot_standby":"on","wal_log_hints":"on","wal_keep_segments":8,"wal_level":"hot_standby","unix_socket_directories":".","max_wal_senders":5}},"loop_wait":3,"ttl":20}' \ http://localhost:8008/config | jq . { "ttl": 20, "maximum_lag_on_failover": 1048576, "retry_timeout": 10, "postgresql": { "use_slots": true, "parameters": { "hot_standby": "on", "unix_socket_directories": ".", "wal_keep_segments": 8, "wal_level": "hot_standby", "wal_log_hints": "on", "max_wal_senders": 5 }, "use_pg_rewind": true }, "loop_wait": 3 } Switchover and failover endpoints --------------------------------- ``POST /switchover`` or ``POST /failover``. These endpoints are very similar to each other. There are a couple of minor differences though: 1. The failover endpoint allows to perform a manual failover when there are no healthy nodes, but at the same time it will not allow you to schedule a switchover. 2. The switchover endpoint is the opposite. It works only when the cluster is healthy (there is a leader) and allows to schedule a switchover at a given time. In the JSON body of the ``POST`` request you must specify at least the ``leader`` or ``candidate`` fields and optionally the ``scheduled_at`` field if you want to schedule a switchover at a specific time. Example: perform a failover to the specific node: .. code-block:: bash $ curl -s http://localhost:8009/failover -XPOST -d '{"candidate":"postgresql1"}' Successfully failed over to "postgresql1" Example: schedule a switchover from the leader to any other healthy replica in the cluster at a specific time: .. code-block:: bash $ curl -s http://localhost:8008/switchover -XPOST -d \ '{"leader":"postgresql0","scheduled_at":"2019-09-24T12:00+00"}' Switchover scheduled Depending on the situation the request might finish with a different HTTP status code and body. The status code **200** is returned when the switchover or failover successfully completed. If the switchover was successfully scheduled, Patroni will return HTTP status code **202**. In case something went wrong, the error status code (one of **400**, **412** or **503**) will be returned with some details in the response body. For more information please check the source code of ``patroni/api.py:do_POST_failover()`` method. The switchover and failover endpoints are used by ``patronictl switchover`` and ``patronictl failover``, respectively. Restart endpoint ---------------- - ``POST /restart``: You can restart Postgres on the specific node by performing the ``POST /restart`` call. In the JSON body of ``POST`` request it is possible to optionally specify some restart conditions: - **restart_pending**: boolean, if set to ``true`` Patroni will restart PostgreSQL only when restart is pending in order to apply some changes in the PostgreSQL config. - **role**: perform restart only if the current role of the node matches with the role from the POST request. - **postgres_version**: perform restart only if the current version of postgres is smaller than specified in the POST request. - **timeout**: how long we should wait before PostgreSQL starts accepting connections. Overrides ``master_start_timeout``. - **schedule**: timestamp with time zone, schedule the restart somewhere in the future. - ``DELETE /restart``: delete the scheduled restart ``POST /restart`` and ``DELETE /restart`` endpoints are used by ``patronictl restart`` and ``patronictl flush`` respectively. Reload endpoint --------------- The ``POST /reload`` call will order Patroni to re-read and apply the configuration file. This is the equivalent of sending the ``SIGHUP`` signal to the Patroni process. In case you changed some of the Postgres parameters which require a restart (like **shared_buffers**), you still have to explicitly do the restart of Postgres by either calling the ``POST /restart`` endpoint or with the help of ``patronictl restart``. The reload endpoint is used by ``patronictl reload``. Reinitialize endpoint --------------------- ``POST /reinitialize``: reinitialize the PostgreSQL data directory on the specified node. It is allowed to be executed only on replicas. Once called, it will remove the data directory and start ``pg_basebackup`` or some alternative :ref:`replica creation method `. The call might fail if Patroni is in a loop trying to recover (restart) a failed Postgres. In order to overcome this problem one can specify ``{"force":true}`` in the request body. The reinitialize endpoint is used by ``patronictl reinit``. patroni-1.6.4/docs/watchdog.rst000066400000000000000000000076241361356115100164730ustar00rootroot00000000000000.. _watchdog: Watchdog support ================ Having multiple PostgreSQL servers running as master can result in transactions lost due to diverging timelines. This situation is also called a split-brain problem. To avoid split-brain Patroni needs to ensure PostgreSQL will not accept any transaction commits after leader key expires in the DCS. Under normal circumstances Patroni will try to achieve this by stopping PostgreSQL when leader lock update fails for any reason. However, this may fail to happen due to various reasons: - Patroni has crashed due to a bug, out-of-memory condition or by being accidentally killed by a system administrator. - Shutting down PostgreSQL is too slow. - Patroni does not get to run due to high load on the system, the VM being paused by the hypervisor, or other infrastructure issues. To guarantee correct behavior under these conditions Patroni supports watchdog devices. Watchdog devices are software or hardware mechanisms that will reset the whole system when they do not get a keepalive heartbeat within a specified timeframe. This adds an additional layer of fail safe in case usual Patroni split-brain protection mechanisms fail. Patroni will try to activate the watchdog before promoting PostgreSQL to master. If watchdog activation fails and watchdog mode is ``required`` then the node will refuse to become master. When deciding to participate in leader election Patroni will also check that watchdog configuration will allow it to become leader at all. After demoting PostgreSQL (for example due to a manual failover) Patroni will disable the watchdog again. Watchdog will also be disabled while Patroni is in paused state. By default Patroni will set up the watchdog to expire 5 seconds before TTL expires. With the default setup of ``loop_wait=10`` and ``ttl=30`` this gives HA loop at least 15 seconds (``ttl`` - ``safety_margin`` - ``loop_wait``) to complete before the system gets forcefully reset. By default accessing DCS is configured to time out after 10 seconds. This means that when DCS is unavailable, for example due to network issues, Patroni and PostgreSQL will have at least 5 seconds (``ttl`` - ``safety_margin`` - ``loop_wait`` - ``retry_timeout``) to come to a state where all client connections are terminated. Safety margin is the amount of time that Patroni reserves for time between leader key update and watchdog keepalive. Patroni will try to send a keepalive immediately after confirmation of leader key update. If Patroni process is suspended for extended amount of time at exactly the right moment the keepalive may be delayed for more than the safety margin without triggering the watchdog. This results in a window of time where watchdog will not trigger before leader key expiration, invalidating the guarantee. To be absolutely sure that watchdog will trigger under all circumstances set up the watchdog to expire after half of TTL by setting ``safety_margin`` to -1 to set watchdog timeout to ``ttl // 2``. If you need this guarantee you probably should increase ``ttl`` and/or reduce ``loop_wait`` and ``retry_timeout``. Currently watchdogs are only supported using Linux watchdog device interface. Setting up software watchdog on Linux ------------------------------------- Default Patroni configuration will try to use ``/dev/watchdog`` on Linux if it is accessible to Patroni. For most use cases using software watchdog built into the Linux kernel is secure enough. To enable software watchdog issue the following commands as root before starting Patroni: .. code-block:: bash modprobe softdog # Replace postgres with the user you will be running patroni under chown postgres /dev/watchdog For testing it may be helpful to disable rebooting by adding ``soft_noboot=1`` to the modprobe command line. In this case the watchdog will just log a line in kernel ring buffer, visible via `dmesg`. Patroni will log information about the watchdog when it is successfully enabled. patroni-1.6.4/extras/000077500000000000000000000000001361356115100145065ustar00rootroot00000000000000patroni-1.6.4/extras/README.md000066400000000000000000000012271361356115100157670ustar00rootroot00000000000000### confd `confd` directory contains haproxy and pgbouncer template files for the [confd](https://github.com/kelseyhightower/confd) -- lightweight configuration management tool You need to copy content of `confd` directory into /etcd/confd and run confd service: ```bash $ confd -prefix=/service/$PATRONI_SCOPE -backend etcd -node $PATRONI_ETCD_URL -interval=10 ``` It will periodically update haproxy.cfg and pgbouncer.ini with the actual list of Patroni nodes from `etcd` and "reload" haproxy and pgbouncer.ini when it is necessary. ### startup-scripts `startup-scripts` directory contains startup scripts for various OSes and management tools for Patroni. patroni-1.6.4/extras/confd/000077500000000000000000000000001361356115100155775ustar00rootroot00000000000000patroni-1.6.4/extras/confd/conf.d/000077500000000000000000000000001361356115100167465ustar00rootroot00000000000000patroni-1.6.4/extras/confd/conf.d/haproxy.toml000066400000000000000000000004751361356115100213430ustar00rootroot00000000000000[template] #prefix = "/service/batman" #owner = "haproxy" #mode = "0644" src = "haproxy.tmpl" dest = "/etc/haproxy/haproxy.cfg" check_cmd = "/usr/sbin/haproxy -c -f {{ .src }}" reload_cmd = "haproxy -f /etc/haproxy/haproxy.cfg -p /var/run/haproxy.pid -D -sf $(cat /var/run/haproxy.pid)" keys = [ "/members/", ] patroni-1.6.4/extras/confd/conf.d/pgbouncer.toml000066400000000000000000000003241361356115100216260ustar00rootroot00000000000000[template] prefix = "/service/batman" owner = "postgres" mode = "0644" src = "pgbouncer.tmpl" dest = "/etc/pgbouncer/pgbouncer.ini" reload_cmd = "systemctl reload pgbouncer" keys = [ "/members/","/leader" ]patroni-1.6.4/extras/confd/templates/000077500000000000000000000000001361356115100175755ustar00rootroot00000000000000patroni-1.6.4/extras/confd/templates/haproxy.tmpl000066400000000000000000000017421361356115100221710ustar00rootroot00000000000000global maxconn 100 defaults log global mode tcp retries 2 timeout client 30m timeout connect 4s timeout server 30m timeout check 5s listen stats mode http bind *:7000 stats enable stats uri / listen master bind *:5000 option httpchk OPTIONS /master http-check expect status 200 default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions {{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split $data.conn_url "/") 2) "@" "/" -1)}} maxconn 100 check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} {{end}} listen replicas bind *:5001 option httpchk OPTIONS /replica http-check expect status 200 default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions {{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split $data.conn_url "/") 2) "@" "/" -1)}} maxconn 100 check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} {{end}} patroni-1.6.4/extras/confd/templates/pgbouncer.tmpl000066400000000000000000000014071361356115100224610ustar00rootroot00000000000000[databases] {{with get "/leader"}}{{$leader := .Value}}{{$leadkey := printf "/members/%s" $leader}}{{with get $leadkey}}{{$data := json .Value}}{{$hostport := base (replace (index (split $data.conn_url "/") 2) "@" "/" -1)}}{{ $host := base (index (split $hostport ":") 0)}}{{ $port := base (index (split $hostport ":") 1)}}* = host={{ $host }} port={{ $port }} pool_size=10{{end}}{{end}} [pgbouncer] logfile = /var/log/postgresql/pgbouncer.log pidfile = /var/run/postgresql/pgbouncer.pid listen_addr = * listen_port = 6432 unix_socket_dir = /var/run/postgresql auth_type = trust auth_file = /etc/pgbouncer/userlist.txt auth_hba_file = /etc/pgbouncer/pg_hba.txt admin_users = pgbouncer stats_users = pgbouncer pool_mode = session max_client_conn = 100 default_pool_size = 20 patroni-1.6.4/extras/startup-scripts/000077500000000000000000000000001361356115100176755ustar00rootroot00000000000000patroni-1.6.4/extras/startup-scripts/README.md000066400000000000000000000021461361356115100211570ustar00rootroot00000000000000# startup scripts for Patroni This directory contains sample startup scripts for various OSes and management tools for Patroni. Scripts supplied: ### patroni.upstart.conf Upstart job for Ubuntu 12.04 or 14.04. Requires Upstart > 1.4. Intended for systems where Patroni has been installed on a base system, rather than in Docker. ### patroni.service Systemd service file, to be copied to /etc/systemd/system/patroni.service, tested on Centos 7.1 with Patroni installed from pip. ### patroni Init.d service file for Debian-like distributions. Copy it to /etc/init.d/, make executable: ```chmod 755 /etc/init.d/patroni``` and run with ```service patroni start```, or make it starting on boot with ```update-rc.d patroni defaults```. Also you might edit some configuration variables in it: PATRONI for patroni.py location CONF for configuration file LOGFILE for log (script creates it if does not exist) Note. If you have several versions of Postgres installed, please add to POSTGRES_VERSION the release number which you wish to run. Script uses this value to append PATH environment with correct path to Postgres bin. patroni-1.6.4/extras/startup-scripts/patroni000066400000000000000000000065311361356115100213010ustar00rootroot00000000000000#!/bin/sh # ### BEGIN INIT INFO # Provides: patroni # Required-Start: $remote_fs $syslog # Required-Stop: $remote_fs $syslog # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Patroni init script # Description: Runners to orchestrate a high-availability PostgreSQL ### END INIT INFO ### BEGIN USER CONFIGURATION CONF="/etc/patroni/postgres.yml" LOGFILE="/var/log/patroni.log" USER="postgres" GROUP="postgres" NAME=patroni PATRONI="/opt/patroni/$NAME.py" PIDFILE="/var/run/$NAME.pid" # Set this parameter, if you have several Postgres versions installed # POSTGRES_VERSION="9.4" POSTGRES_VERSION="" ### END USER CONFIGURATION . /lib/lsb/init-functions # Loading this library for get_versions() function if test ! -e /usr/share/postgresql-common/init.d-functions; then log_failure_msg "Probably postgresql-common does not installed." exit 1 else . /usr/share/postgresql-common/init.d-functions fi # Is there Patroni executable? if test ! -e $PATRONI; then log_failure_msg "Patroni executable $PATRONI does not exist." exit 1 fi # Is there Patroni configuration file? if test ! -e $CONF; then log_failure_msg "Patroni configuration file $CONF does not exist." exit 1 fi # Create logfile if doesn't exist if test ! -e $LOGFILE; then log_action_msg "Creating logfile for Patroni..." touch $LOGFILE chown $USER:$GROUP $LOGFILE fi prepare_pgpath() { if [ "$POSTGRES_VERSION" != "" ]; then if [ -x /usr/lib/postgresql/$POSTGRES_VERSION/bin/pg_ctl ]; then PGPATH="/usr/lib/postgresql/$POSTGRES_VERSION/bin" else log_failure_msg "Postgres version incorrect, check POSTGRES_VERSION variable." exit 0 fi else get_versions if echo $versions | grep -q -e "\s"; then log_warning_msg "You have several Postgres versions installed. Please, use POSTGRES_VERSION to define correct environment." else versions=`echo $versions | sed -e 's/^[ \t]*//'` PGPATH="/usr/lib/postgresql/$versions/bin" fi fi } get_pid() { if test -e $PIDFILE; then PID=`cat $PIDFILE` CHILDPID=`ps --ppid $PID -o %p --no-headers` else log_failure_msg "Could not find PID file. Patroni probably down." exit 1 fi } case "$1" in start) prepare_pgpath PGPATH=$PATH:$PGPATH log_success_msg "Starting Patroni\n" exec start-stop-daemon --start --quiet \ --background \ --pidfile $PIDFILE --make-pidfile \ --chuid $USER:$GROUP \ --chdir `eval echo ~$USER` \ --exec $PATRONI \ --startas /bin/sh -- \ -c "/usr/bin/env PATH=$PGPATH /usr/bin/python $PATRONI $CONF >> $LOGFILE 2>&1" ;; stop) log_success_msg "Stopping Patroni" get_pid start-stop-daemon --stop --pid $CHILDPID start-stop-daemon --stop --pidfile $PIDFILE --remove-pidfile --quiet ;; reload) log_success_msg "Reloading Patroni configuration" get_pid kill -HUP $CHILDPID ;; status) get_pid if start-stop-daemon -T --pid $CHILDPID; then log_success_msg "Patroni is running\n" exit 0 else log_warning_msg "Patroni in not running\n" fi ;; restart) $0 stop $0 start ;; *) echo "Usage: /etc/init.d/$NAME {start|stop|restart|reload|status}" exit 1 ;; esac if [ $? -eq 0 ]; then echo . exit 0 else echo " failed" exit 1 fi patroni-1.6.4/extras/startup-scripts/patroni.service000066400000000000000000000023211361356115100227310ustar00rootroot00000000000000# This is an example systemd config file for Patroni # You can copy it to "/etc/systemd/system/patroni.service", [Unit] Description=Runners to orchestrate a high-availability PostgreSQL After=syslog.target network.target [Service] Type=simple User=postgres Group=postgres # Read in configuration file if it exists, otherwise proceed EnvironmentFile=-/etc/patroni_env.conf WorkingDirectory=~ # Where to send early-startup messages from the server # This is normally controlled by the global default set by systemd #StandardOutput=syslog # Pre-commands to start watchdog device # Uncomment if watchdog is part of your patroni setup #ExecStartPre=-/usr/bin/sudo /sbin/modprobe softdog #ExecStartPre=-/usr/bin/sudo /bin/chown postgres /dev/watchdog # Start the patroni process ExecStart=/bin/patroni /etc/patroni.yml # Send HUP to reload from patroni.yml ExecReload=/bin/kill -s HUP $MAINPID # only kill the patroni process, not it's children, so it will gracefully stop postgres KillMode=process # Give a reasonable amount of time for the server to start up/shut down TimeoutSec=30 # Do not restart the service if it crashes, we want to manually inspect database on failure Restart=no [Install] WantedBy=multi-user.target patroni-1.6.4/extras/startup-scripts/patroni.upstart.conf000066400000000000000000000014641361356115100237260ustar00rootroot00000000000000# patroni - patroni daemon # # controls startup/shutdown of postgres # you should disable any postgres start jobs # # assumes that patroni has been installed into the # pythonpath by using setup.py install description "patroni start daemon" start on net-device-up stop on runlevel [06] respawn respawn limit 5 10 # set location of patroni env PATRONI=/usr/local/bin/patroni # virtualenv example # env PATRONI=/var/lib/postgresql/patronienv/bin/patroni # set location of config file env PATRONICONF=/etc/patroni/patroni.yml # set log dir for patroni logs # postgres user must have write permission env POSTGRESLOGDIR=/var/log/postgresql setuid postgres setgid postgres script exec start-stop-daemon --start \ --exec $PATRONI -- $PATRONICONF \ >> $POSTGRESLOGDIR/patroni.log 2>&1 end script patroni-1.6.4/features/000077500000000000000000000000001361356115100150165ustar00rootroot00000000000000patroni-1.6.4/features/archive-restore.py000066400000000000000000000013501361356115100204710ustar00rootroot00000000000000#!/usr/bin/env python import os import argparse import shutil if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--dirname", required=True) parser.add_argument("--pathname", required=True) parser.add_argument("--filename", required=True) parser.add_argument("--mode", required=True, choices=("archive", "restore")) args, _ = parser.parse_known_args() full_filename = os.path.join(args.dirname, args.filename) if args.mode == "archive": if not os.path.isdir(args.dirname): os.makedirs(args.dirname) if not os.path.exists(full_filename): shutil.copy(args.pathname, full_filename) else: shutil.copy(full_filename, args.pathname) patroni-1.6.4/features/backup_create.py000077500000000000000000000010561361356115100201650ustar00rootroot00000000000000#!/usr/bin/env python import argparse import subprocess import sys if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--datadir", required=True) parser.add_argument("--dbname", required=True) parser.add_argument("--walmethod", required=True, choices=("fetch", "stream", "none")) args, _ = parser.parse_known_args() walmethod = ["-X", args.walmethod] if args.walmethod != "none" else [] sys.exit(subprocess.call(["pg_basebackup", "-D", args.datadir, "-c", "fast", "-d", args.dbname] + walmethod)) patroni-1.6.4/features/backup_restore.py000077500000000000000000000004741361356115100204100ustar00rootroot00000000000000#!/usr/bin/env python import argparse import shutil if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--datadir", required=True) parser.add_argument("--sourcedir", required=True) args, _ = parser.parse_known_args() shutil.copytree(args.sourcedir, args.datadir) patroni-1.6.4/features/basic_replication.feature000066400000000000000000000052161361356115100220510ustar00rootroot00000000000000Feature: basic replication We should check that the basic bootstrapping, replication and failover works. Scenario: check replication of a single table Given I start postgres0 Then postgres0 is a leader after 10 seconds And there is a non empty initialize key in DCS after 15 seconds When I issue a PATCH request to http://127.0.0.1:8008/config with {"ttl": 20, "loop_wait": 2, "synchronous_mode": true} Then I receive a response code 200 When I start postgres1 And I configure and start postgres2 with a tag replicatefrom postgres0 And "sync" key in DCS has leader=postgres0 after 20 seconds And I add the table foo to postgres0 Then table foo is present on postgres1 after 20 seconds Then table foo is present on postgres2 after 20 seconds Scenario: check restart of sync replica Given I shut down postgres2 Then "sync" key in DCS has sync_standby=postgres1 after 5 seconds When I start postgres2 And I shut down postgres1 Then "sync" key in DCS has sync_standby=postgres2 after 10 seconds When I start postgres1 And "members/postgres1" key in DCS has state=running after 10 seconds And I sleep for 2 seconds When I issue a GET request to http://127.0.0.1:8010/sync Then I receive a response code 200 When I issue a GET request to http://127.0.0.1:8009/async Then I receive a response code 200 Scenario: check the basic failover in synchronous mode Given I run patronictl.py pause batman Then I receive a response returncode 0 When I sleep for 2 seconds And I shut down postgres0 And I run patronictl.py resume batman Then I receive a response returncode 0 And postgres2 role is the primary after 24 seconds And Response on GET http://127.0.0.1:8010/history contains recovery after 10 seconds When I issue a PATCH request to http://127.0.0.1:8010/config with {"synchronous_mode": null, "master_start_timeout": 0} Then I receive a response code 200 When I add the table bar to postgres2 Then table bar is present on postgres1 after 20 seconds And Response on GET http://127.0.0.1:8010/config contains master_start_timeout after 10 seconds Scenario: check immediate failover when master_start_timeout=0 Given I kill postmaster on postgres2 Then postgres1 is a leader after 10 seconds And postgres1 role is the primary after 10 seconds Scenario: check rejoin of the former master with pg_rewind Given I add the table splitbrain to postgres0 And I start postgres0 Then postgres0 role is the secondary after 20 seconds When I add the table buz to postgres1 Then table buz is present on postgres0 after 20 seconds patroni-1.6.4/features/callback.py000077500000000000000000000010251361356115100171250ustar00rootroot00000000000000#!/usr/bin/env python import os import psycopg2 import sys if __name__ == '__main__': if not (len(sys.argv) >= 3 and sys.argv[3] == "master"): sys.exit(1) os.environ['PGPASSWORD'] = 'zalando' connection = psycopg2.connect(host='127.0.0.1', port=sys.argv[1], user='postgres') cursor = connection.cursor() cursor.execute("SELECT slot_name FROM pg_replication_slots WHERE slot_type = 'logical'") with open("data/postgres0/label", "w") as label: label.write(next(iter(cursor.fetchone()), "")) patroni-1.6.4/features/callback2.py000077500000000000000000000002231361356115100172060ustar00rootroot00000000000000#!/usr/bin/env python import sys with open("data/{0}/{0}_cb.log".format(sys.argv[1]), "a+") as log: log.write(" ".join(sys.argv[-3:]) + "\n") patroni-1.6.4/features/cascading_replication.feature000066400000000000000000000014301361356115100226760ustar00rootroot00000000000000Feature: cascading replication We should check that patroni can do base backup and streaming from the replica Scenario: check a base backup and streaming replication from a replica Given I start postgres0 And postgres0 is a leader after 10 seconds And I configure and start postgres1 with a tag clonefrom true And replication works from postgres0 to postgres1 after 20 seconds And I create label with "postgres0" in postgres0 data directory And I create label with "postgres1" in postgres1 data directory And "members/postgres1" key in DCS has state=running after 12 seconds And I configure and start postgres2 with a tag replicatefrom postgres1 Then replication works from postgres0 to postgres2 after 30 seconds And there is a label with "postgres1" in postgres2 data directory patroni-1.6.4/features/custom_bootstrap.feature000066400000000000000000000014141361356115100220020ustar00rootroot00000000000000Feature: custom bootstrap We should check that patroni can bootstrap a new cluster from a backup Scenario: clone existing cluster using pg_basebackup Given I start postgres0 Then postgres0 is a leader after 10 seconds When I add the table foo to postgres0 And I start postgres1 in a cluster batman1 as a clone of postgres0 Then postgres1 is a leader of batman1 after 10 seconds Then table foo is present on postgres1 after 10 seconds Scenario: make a backup and do a restore into a new cluster Given I add the table bar to postgres1 And I do a backup of postgres1 When I start postgres2 in a cluster batman2 from backup Then postgres2 is a leader of batman2 after 30 seconds And table bar is present on postgres2 after 10 seconds patroni-1.6.4/features/environment.py000066400000000000000000000731761361356115100177520ustar00rootroot00000000000000import abc import datetime import os import psycopg2 import json import shutil import signal import six import subprocess import sys import tempfile import threading import time import yaml @six.add_metaclass(abc.ABCMeta) class AbstractController(object): def __init__(self, context, name, work_directory, output_dir): self._context = context self._name = name self._work_directory = work_directory self._output_dir = output_dir self._handle = None self._log = None def _has_started(self): return self._handle and self._handle.pid and self._handle.poll() is None def _is_running(self): return self._has_started() @abc.abstractmethod def _is_accessible(self): """process is accessible for queries""" @abc.abstractmethod def _start(self): """start process""" def start(self, max_wait_limit=5): if self._is_running(): return True self._log = open(os.path.join(self._output_dir, self._name + '.log'), 'a') self._handle = self._start() assert self._has_started(), "Process {0} is not running after being started".format(self._name) max_wait_limit *= self._context.timeout_multiplier for _ in range(max_wait_limit): if self._is_accessible(): break time.sleep(1) else: assert False,\ "{0} instance is not available for queries after {1} seconds".format(self._name, max_wait_limit) def stop(self, kill=False, timeout=15, _=False): term = False start_time = time.time() timeout *= self._context.timeout_multiplier while self._handle and self._is_running(): if kill: self._handle.kill() elif not term: self._handle.terminate() term = True time.sleep(1) if not kill and time.time() - start_time > timeout: kill = True if self._log: self._log.close() def cancel_background(self): pass class PatroniController(AbstractController): __PORT = 5360 PATRONI_CONFIG = '{}.yml' """ starts and stops individual patronis""" def __init__(self, context, name, work_directory, output_dir, custom_config=None): super(PatroniController, self).__init__(context, 'patroni_' + name, work_directory, output_dir) PatroniController.__PORT += 1 self._data_dir = os.path.join(work_directory, 'data', name) self._connstring = None if custom_config and 'watchdog' in custom_config: self.watchdog = WatchdogMonitor(name, work_directory, output_dir) custom_config['watchdog'] = {'driver': 'testing', 'device': self.watchdog.fifo_path, 'mode': 'required'} else: self.watchdog = None self._scope = (custom_config or {}).get('scope', 'batman') self._config = self._make_patroni_test_config(name, custom_config) self._closables = [] self._conn = None self._curs = None def write_label(self, content): with open(os.path.join(self._data_dir, 'label'), 'w') as f: f.write(content) def read_label(self, label): try: with open(os.path.join(self._data_dir, label), 'r') as f: return f.read().strip() except IOError: return None @staticmethod def recursive_update(dst, src): for k, v in src.items(): if k in dst and isinstance(dst[k], dict): PatroniController.recursive_update(dst[k], v) else: dst[k] = v def update_config(self, custom_config): with open(self._config) as r: config = yaml.safe_load(r) self.recursive_update(config, custom_config) with open(self._config, 'w') as w: yaml.safe_dump(config, w, default_flow_style=False) self._scope = config.get('scope', 'batman') def add_tag_to_config(self, tag, value): self.update_config({'tags': {tag: value}}) def _start(self): if self.watchdog: self.watchdog.start() if isinstance(self._context.dcs_ctl, KubernetesController): self._context.dcs_ctl.create_pod(self._name[8:], self._scope) os.environ['PATRONI_KUBERNETES_POD_IP'] = '10.0.0.' + self._name[-1] return subprocess.Popen([sys.executable, '-m', 'coverage', 'run', '--source=patroni', '-p', 'patroni.py', self._config], stdout=self._log, stderr=subprocess.STDOUT, cwd=self._work_directory) def stop(self, kill=False, timeout=15, postgres=False): if postgres: return subprocess.call(['pg_ctl', '-D', self._data_dir, 'stop', '-mi', '-w']) super(PatroniController, self).stop(kill, timeout) if isinstance(self._context.dcs_ctl, KubernetesController): self._context.dcs_ctl.delete_pod(self._name[8:]) if self.watchdog: self.watchdog.stop() def _is_accessible(self): cursor = self.query("SELECT 1", fail_ok=True) if cursor is not None: cursor.execute("SET synchronous_commit TO 'local'") return True def _make_patroni_test_config(self, name, custom_config): patroni_config_name = self.PATRONI_CONFIG.format(name) patroni_config_path = os.path.join(self._output_dir, patroni_config_name) with open(patroni_config_name) as f: config = yaml.safe_load(f) config.pop('etcd', None) host = config['postgresql']['listen'].split(':')[0] config['postgresql']['listen'] = config['postgresql']['connect_address'] = '{0}:{1}'.format(host, self.__PORT) config['name'] = name config['postgresql']['data_dir'] = self._data_dir config['postgresql']['use_unix_socket'] = os.name != 'nt' # windows doesn't yet support unix-domain sockets config['postgresql']['pgpass'] = os.path.join(tempfile.gettempdir(), 'pgpass_' + name) config['postgresql']['parameters'].update({ 'logging_collector': 'on', 'log_destination': 'csvlog', 'log_directory': self._output_dir, 'log_filename': name + '.log', 'log_statement': 'all', 'log_min_messages': 'debug1', 'unix_socket_directories': self._data_dir}) if 'bootstrap' in config: config['bootstrap']['post_bootstrap'] = 'psql -w -c "SELECT 1"' if 'initdb' in config['bootstrap']: config['bootstrap']['initdb'].extend([{'auth': 'md5'}, {'auth-host': 'md5'}]) if custom_config is not None: self.recursive_update(config, custom_config) if config['postgresql'].get('callbacks', {}).get('on_role_change'): config['postgresql']['callbacks']['on_role_change'] += ' ' + str(self.__PORT) with open(patroni_config_path, 'w') as f: yaml.safe_dump(config, f, default_flow_style=False) user = config['postgresql'].get('authentication', config['postgresql']).get('superuser', {}) self._connkwargs = {k: user[n] for n, k in [('username', 'user'), ('password', 'password')] if n in user} self._connkwargs.update({'host': host, 'port': self.__PORT, 'database': 'postgres'}) self._replication = config['postgresql'].get('authentication', config['postgresql']).get('replication', {}) self._replication.update({'host': host, 'port': self.__PORT, 'database': 'postgres'}) return patroni_config_path def _connection(self): if not self._conn or self._conn.closed != 0: self._conn = psycopg2.connect(**self._connkwargs) self._conn.autocommit = True return self._conn def _cursor(self): if not self._curs or self._curs.closed or self._curs.connection.closed != 0: self._curs = self._connection().cursor() return self._curs def query(self, query, fail_ok=False): try: cursor = self._cursor() cursor.execute(query) return cursor except psycopg2.Error: if not fail_ok: raise def check_role_has_changed_to(self, new_role, timeout=10): bound_time = time.time() + timeout recovery_status = new_role != 'primary' while time.time() < bound_time: cur = self.query("SELECT pg_is_in_recovery()", fail_ok=True) if cur: row = cur.fetchone() if row and row[0] == recovery_status: return True time.sleep(1) return False def get_watchdog(self): return self.watchdog def _get_pid(self): try: pidfile = os.path.join(self._data_dir, 'postmaster.pid') if not os.path.exists(pidfile): return None return int(open(pidfile).readline().strip()) except Exception: return None def patroni_hang(self, timeout): hang = ProcessHang(self._handle.pid, timeout) self._closables.append(hang) hang.start() def cancel_background(self): for obj in self._closables: obj.close() self._closables = [] @property def backup_source(self): return 'postgres://{username}:{password}@{host}:{port}/{database}'.format(**self._replication) def backup(self, dest=os.path.join('data', 'basebackup')): subprocess.call(PatroniPoolController.BACKUP_SCRIPT + ['--walmethod=none', '--datadir=' + os.path.join(self._work_directory, dest), '--dbname=' + self.backup_source]) class ProcessHang(object): """A background thread implementing a cancelable process hang via SIGSTOP.""" def __init__(self, pid, timeout): self._cancelled = threading.Event() self._thread = threading.Thread(target=self.run) self.pid = pid self.timeout = timeout def start(self): self._thread.start() def run(self): os.kill(self.pid, signal.SIGSTOP) try: self._cancelled.wait(self.timeout) finally: os.kill(self.pid, signal.SIGCONT) def close(self): self._cancelled.set() self._thread.join() class AbstractDcsController(AbstractController): _CLUSTER_NODE = '/service/{0}' def __init__(self, context, mktemp=True): work_directory = mktemp and tempfile.mkdtemp() or None super(AbstractDcsController, self).__init__(context, self.name(), work_directory, context.pctl.output_dir) def _is_accessible(self): return self._is_running() def stop(self, kill=False, timeout=15): """ terminate process and wipe out the temp work directory, but only if we actually started it""" super(AbstractDcsController, self).stop(kill=kill, timeout=timeout) if self._work_directory: shutil.rmtree(self._work_directory) def path(self, key=None, scope='batman'): return self._CLUSTER_NODE.format(scope) + (key and '/' + key or '') @abc.abstractmethod def query(self, key, scope='batman'): """ query for a value of a given key """ @abc.abstractmethod def cleanup_service_tree(self): """ clean all contents stored in the tree used for the tests """ @classmethod def get_subclasses(cls): for subclass in cls.__subclasses__(): for subsubclass in subclass.get_subclasses(): yield subsubclass yield subclass @classmethod def name(cls): return cls.__name__[:-10].lower() class ConsulController(AbstractDcsController): def __init__(self, context): super(ConsulController, self).__init__(context) os.environ['PATRONI_CONSUL_HOST'] = 'localhost:8500' os.environ['PATRONI_CONSUL_REGISTER_SERVICE'] = 'on' self._config_file = None import consul self._client = consul.Consul() def _start(self): self._config_file = self._work_directory + '.json' with open(self._config_file, 'wb') as f: f.write(b'{"session_ttl_min":"5s","server":true,"bootstrap":true,"advertise_addr":"127.0.0.1"}') return subprocess.Popen(['consul', 'agent', '-config-file', self._config_file, '-data-dir', self._work_directory], stdout=self._log, stderr=subprocess.STDOUT) def stop(self, kill=False, timeout=15): super(ConsulController, self).stop(kill=kill, timeout=timeout) if self._config_file: os.unlink(self._config_file) def _is_running(self): try: return bool(self._client.status.leader()) except Exception: return False def path(self, key=None, scope='batman'): return super(ConsulController, self).path(key, scope)[1:] def query(self, key, scope='batman'): _, value = self._client.kv.get(self.path(key, scope)) return value and value['Value'].decode('utf-8') def cleanup_service_tree(self): self._client.kv.delete(self.path(scope=''), recurse=True) def start(self, max_wait_limit=15): super(ConsulController, self).start(max_wait_limit) class EtcdController(AbstractDcsController): """ handles all etcd related tasks, used for the tests setup and cleanup """ def __init__(self, context): super(EtcdController, self).__init__(context) os.environ['PATRONI_ETCD_HOST'] = 'localhost:2379' import etcd self._client = etcd.Client(port=2379) def _start(self): return subprocess.Popen(["etcd", "--debug", "--data-dir", self._work_directory], stdout=self._log, stderr=subprocess.STDOUT) def query(self, key, scope='batman'): import etcd try: return self._client.get(self.path(key, scope)).value except etcd.EtcdKeyNotFound: return None def cleanup_service_tree(self): import etcd try: self._client.delete(self.path(scope=''), recursive=True) except (etcd.EtcdKeyNotFound, etcd.EtcdConnectionFailed): return except Exception as e: assert False, "exception when cleaning up etcd contents: {0}".format(e) def _is_running(self): # if etcd is running, but we didn't start it try: return bool(self._client.machines) except Exception: return False class KubernetesController(AbstractDcsController): def __init__(self, context): super(KubernetesController, self).__init__(context) self._namespace = 'default' self._labels = {"application": "patroni"} self._label_selector = ','.join('{0}={1}'.format(k, v) for k, v in self._labels.items()) os.environ['PATRONI_KUBERNETES_LABELS'] = json.dumps(self._labels) os.environ['PATRONI_KUBERNETES_USE_ENDPOINTS'] = 'true' from kubernetes import client as k8s_client, config as k8s_config k8s_config.load_kube_config(context='local') self._client = k8s_client self._api = self._client.CoreV1Api() def _start(self): pass def create_pod(self, name, scope): labels = self._labels.copy() labels['cluster-name'] = scope metadata = self._client.V1ObjectMeta(namespace=self._namespace, name=name, labels=labels) spec = self._client.V1PodSpec(containers=[self._client.V1Container(name=name, image='empty')]) body = self._client.V1Pod(metadata=metadata, spec=spec) self._api.create_namespaced_pod(self._namespace, body) def delete_pod(self, name): try: self._api.delete_namespaced_pod(name, self._namespace, body=self._client.V1DeleteOptions()) except Exception: pass while True: try: self._api.read_namespaced_pod(name, self._namespace) except Exception: break def query(self, key, scope='batman'): if key.startswith('members/'): pod = self._api.read_namespaced_pod(key[8:], self._namespace) return (pod.metadata.annotations or {}).get('status', '') else: try: ep = scope + {'leader': '', 'history': '-config', 'initialize': '-config'}.get(key, '-' + key) e = self._api.read_namespaced_endpoints(ep, self._namespace) if key != 'sync': return e.metadata.annotations[key] else: return json.dumps(e.metadata.annotations) except Exception: return None def cleanup_service_tree(self): try: self._api.delete_collection_namespaced_pod(self._namespace, label_selector=self._label_selector) except Exception: pass try: self._api.delete_collection_namespaced_endpoints(self._namespace, label_selector=self._label_selector) except Exception: pass while True: result = self._api.list_namespaced_pod(self._namespace, label_selector=self._label_selector) if len(result.items) < 1: break def _is_running(self): return True class ZooKeeperController(AbstractDcsController): """ handles all zookeeper related tasks, used for the tests setup and cleanup """ def __init__(self, context, export_env=True): super(ZooKeeperController, self).__init__(context, False) if export_env: os.environ['PATRONI_ZOOKEEPER_HOSTS'] = "'localhost:2181'" import kazoo.client self._client = kazoo.client.KazooClient() def _start(self): pass # TODO: implement later def query(self, key, scope='batman'): import kazoo.exceptions try: return self._client.get(self.path(key, scope))[0].decode('utf-8') except kazoo.exceptions.NoNodeError: return None def cleanup_service_tree(self): import kazoo.exceptions try: self._client.delete(self.path(scope=''), recursive=True) except (kazoo.exceptions.NoNodeError): return except Exception as e: assert False, "exception when cleaning up zookeeper contents: {0}".format(e) def _is_running(self): # if zookeeper is running, but we didn't start it if self._client.connected: return True try: return self._client.start(1) or True except Exception: return False class ExhibitorController(ZooKeeperController): def __init__(self, context): super(ExhibitorController, self).__init__(context, False) os.environ.update({'PATRONI_EXHIBITOR_HOSTS': 'localhost', 'PATRONI_EXHIBITOR_PORT': '8181'}) class PatroniPoolController(object): BACKUP_SCRIPT = [sys.executable, 'features/backup_create.py'] ARCHIVE_RESTORE_SCRIPT = ' '.join((sys.executable, os.path.abspath('features/archive-restore.py'))) def __init__(self, context): self._context = context self._dcs = None self._output_dir = None self._patroni_path = None self._processes = {} self.create_and_set_output_directory('') self.known_dcs = {subclass.name(): subclass for subclass in AbstractDcsController.get_subclasses()} @property def patroni_path(self): if self._patroni_path is None: cwd = os.path.realpath(__file__) while True: cwd, entry = os.path.split(cwd) if entry == 'features' or cwd == '/': break self._patroni_path = cwd return self._patroni_path @property def output_dir(self): return self._output_dir def start(self, name, max_wait_limit=20, custom_config=None): if name not in self._processes: self._processes[name] = PatroniController(self._context, name, self.patroni_path, self._output_dir, custom_config) self._processes[name].start(max_wait_limit) def __getattr__(self, func): if func not in ['stop', 'query', 'write_label', 'read_label', 'check_role_has_changed_to', 'add_tag_to_config', 'get_watchdog', 'patroni_hang', 'backup']: raise AttributeError("PatroniPoolController instance has no attribute '{0}'".format(func)) def wrapper(name, *args, **kwargs): return getattr(self._processes[name], func)(*args, **kwargs) return wrapper def stop_all(self): for ctl in self._processes.values(): ctl.cancel_background() ctl.stop() self._processes.clear() def create_and_set_output_directory(self, feature_name): feature_dir = os.path.join(self.patroni_path, 'features', 'output', feature_name.replace(' ', '_')) if os.path.exists(feature_dir): shutil.rmtree(feature_dir) os.makedirs(feature_dir) self._output_dir = feature_dir def clone(self, from_name, cluster_name, to_name): f = self._processes[from_name] custom_config = { 'scope': cluster_name, 'bootstrap': { 'method': 'pg_basebackup', 'pg_basebackup': { 'command': " ".join(self.BACKUP_SCRIPT) + ' --walmethod=stream --dbname=' + f.backup_source }, 'dcs': { 'postgresql': { 'parameters': { 'max_connections': 101 } } } }, 'postgresql': { 'parameters': { 'archive_mode': 'on', 'archive_command': (self.ARCHIVE_RESTORE_SCRIPT + ' --mode archive ' + '--dirname {} --filename %f --pathname %p').format( os.path.join(self.patroni_path, 'data', 'wal_archive')) }, 'authentication': { 'superuser': {'password': 'zalando1'}, 'replication': {'password': 'rep-pass1'} } } } self.start(to_name, custom_config=custom_config) def bootstrap_from_backup(self, name, cluster_name): custom_config = { 'scope': cluster_name, 'bootstrap': { 'method': 'backup_restore', 'backup_restore': { 'command': (sys.executable + ' features/backup_restore.py --sourcedir=' + os.path.join(self.patroni_path, 'data', 'basebackup')), 'recovery_conf': { 'recovery_target_action': 'promote', 'recovery_target_timeline': 'latest', 'restore_command': (self.ARCHIVE_RESTORE_SCRIPT + ' --mode restore ' + '--dirname {} --filename %f --pathname %p').format( os.path.join(self.patroni_path, 'data', 'wal_archive')) } } }, 'postgresql': { 'authentication': { 'superuser': {'password': 'zalando2'}, 'replication': {'password': 'rep-pass2'} } } } self.start(name, custom_config=custom_config) @property def dcs(self): if self._dcs is None: self._dcs = os.environ.pop('DCS', 'etcd') assert self._dcs in self.known_dcs, 'Unsupported dcs: ' + self._dcs return self._dcs class WatchdogMonitor(object): """Testing harness for emulating a watchdog device as a named pipe. Because we can't easily emulate ioctl's we require a custom driver on Patroni side. The device takes no action, only notes if it was pinged and/or triggered. """ def __init__(self, name, work_directory, output_dir): self.fifo_path = os.path.join(work_directory, 'data', 'watchdog.{0}.fifo'.format(name)) self.fifo_file = None self._stop_requested = False # Relying on bool setting being atomic self._thread = None self.last_ping = None self.was_pinged = False self.was_closed = False self._was_triggered = False self.timeout = 60 self._log_file = open(os.path.join(output_dir, 'watchdog.{0}.log'.format(name)), 'w') self._log("watchdog {0} initialized".format(name)) def _log(self, msg): tstamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S,%f") self._log_file.write("{0}: {1}\n".format(tstamp, msg)) def start(self): assert self._thread is None self._stop_requested = False self._log("starting fifo {0}".format(self.fifo_path)) fifo_dir = os.path.dirname(self.fifo_path) if os.path.exists(self.fifo_path): os.unlink(self.fifo_path) elif not os.path.exists(fifo_dir): os.mkdir(fifo_dir) os.mkfifo(self.fifo_path) self.last_ping = time.time() self._thread = threading.Thread(target=self.run) self._thread.start() def run(self): try: while not self._stop_requested: self._log("opening") self.fifo_file = os.open(self.fifo_path, os.O_RDONLY) try: self._log("Fifo {0} connected".format(self.fifo_path)) self.was_closed = False while not self._stop_requested: c = os.read(self.fifo_file, 1) if c == b'X': self._log("Stop requested") return elif c == b'': self._log("Pipe closed") break elif c == b'C': command = b'' c = os.read(self.fifo_file, 1) while c != b'\n' and c != b'': command += c c = os.read(self.fifo_file, 1) command = command.decode('utf8') if command.startswith('timeout='): self.timeout = int(command.split('=')[1]) self._log("timeout={0}".format(self.timeout)) elif c in [b'V', b'1']: cur_time = time.time() if cur_time - self.last_ping > self.timeout: self._log("Triggered") self._was_triggered = True if c == b'V': self._log("magic close") self.was_closed = True elif c == b'1': self.was_pinged = True self._log("ping after {0} seconds".format(cur_time - (self.last_ping or cur_time))) self.last_ping = cur_time else: self._log('Unknown command {0} received from fifo'.format(c)) finally: self.was_closed = True self._log("closing") os.close(self.fifo_file) except Exception as e: self._log("Error {0}".format(e)) finally: self._log("stopping") self._log_file.flush() if os.path.exists(self.fifo_path): os.unlink(self.fifo_path) def stop(self): self._log("Monitor stop") self._stop_requested = True try: if os.path.exists(self.fifo_path): fd = os.open(self.fifo_path, os.O_WRONLY) os.write(fd, b'X') os.close(fd) except Exception as e: self._log("err while closing: {0}".format(str(e))) if self._thread: self._thread.join() self._thread = None def reset(self): self._log("reset") self.was_pinged = self.was_closed = self._was_triggered = False @property def was_triggered(self): delta = time.time() - self.last_ping triggered = self._was_triggered or not self.was_closed and delta > self.timeout self._log("triggered={0}, {1}s left".format(triggered, self.timeout - delta)) return triggered # actions to execute on start/stop of the tests and before running invidual features def before_all(context): os.environ.update({'PATRONI_RESTAPI_USERNAME': 'username', 'PATRONI_RESTAPI_PASSWORD': 'password'}) context.ci = 'TRAVIS_BUILD_NUMBER' in os.environ or 'BUILD_NUMBER' in os.environ context.timeout_multiplier = 2 if context.ci else 1 context.pctl = PatroniPoolController(context) context.dcs_ctl = context.pctl.known_dcs[context.pctl.dcs](context) context.dcs_ctl.start() try: context.dcs_ctl.cleanup_service_tree() except AssertionError: # after_all handlers won't be executed in before_all context.dcs_ctl.stop() raise def after_all(context): context.dcs_ctl.stop() subprocess.call([sys.executable, '-m', 'coverage', 'combine']) subprocess.call([sys.executable, '-m', 'coverage', 'report']) def before_feature(context, feature): """ create per-feature output directory to collect Patroni and PostgreSQL logs """ context.pctl.create_and_set_output_directory(feature.name) def after_feature(context, feature): """ stop all Patronis, remove their data directory and cleanup the keys in etcd """ context.pctl.stop_all() shutil.rmtree(os.path.join(context.pctl.patroni_path, 'data')) context.dcs_ctl.cleanup_service_tree() if feature.status == 'failed': shutil.copytree(context.pctl.output_dir, context.pctl.output_dir + '_failed') patroni-1.6.4/features/patroni_api.feature000066400000000000000000000140701361356115100207020ustar00rootroot00000000000000Feature: patroni api We should check that patroni correctly responds to valid and not-valid API requests. Scenario: check API requests on a stand-alone server Given I start postgres0 And postgres0 is a leader after 10 seconds When I issue a GET request to http://127.0.0.1:8008/ Then I receive a response code 200 And I receive a response state running And I receive a response role master When I issue a GET request to http://127.0.0.1:8008/health Then I receive a response code 200 When I issue a GET request to http://127.0.0.1:8008/replica Then I receive a response code 503 When I run patronictl.py reinit batman postgres0 --force Then I receive a response returncode 0 And I receive a response output "Failed: reinitialize for member postgres0, status code=503, (I am the leader, can not reinitialize)" When I run patronictl.py switchover batman --master postgres0 --force Then I receive a response returncode 1 And I receive a response output "Error: No candidates found to switchover to" When I issue a POST request to http://127.0.0.1:8008/switchover with {"leader": "postgres0"} Then I receive a response code 412 And I receive a response text switchover is not possible: cluster does not have members except leader When I issue an empty POST request to http://127.0.0.1:8008/failover Then I receive a response code 400 When I issue a POST request to http://127.0.0.1:8008/failover with {"foo": "bar"} Then I receive a response code 400 And I receive a response text "Failover could be performed only to a specific candidate" Scenario: check local configuration reload Given I add tag new_tag new_value to postgres0 config And I issue an empty POST request to http://127.0.0.1:8008/reload Then I receive a response code 202 Scenario: check dynamic configuration change via DCS Given I run patronictl.py edit-config -s 'ttl=10' -s 'loop_wait=2' -p 'max_connections=101' --force batman Then I receive a response returncode 0 And I receive a response output "+loop_wait: 2" And Response on GET http://127.0.0.1:8008/patroni contains pending_restart after 11 seconds When I issue a GET request to http://127.0.0.1:8008/config Then I receive a response code 200 And I receive a response loop_wait 2 When I issue a GET request to http://127.0.0.1:8008/patroni Then I receive a response code 200 And I receive a response tags {'new_tag': 'new_value'} And I sleep for 4 seconds Scenario: check the scheduled restart Given I issue a PATCH request to http://127.0.0.1:8008/config with {"postgresql": {"parameters": {"superuser_reserved_connections": "6"}}} Then I receive a response code 200 And Response on GET http://127.0.0.1:8008/patroni contains pending_restart after 5 seconds Given I issue a scheduled restart at http://127.0.0.1:8008 in 3 seconds with {"role": "replica"} Then I receive a response code 202 And I sleep for 4 seconds And Response on GET http://127.0.0.1:8008/patroni contains pending_restart after 10 seconds Given I issue a scheduled restart at http://127.0.0.1:8008 in 3 seconds with {"restart_pending": "True"} Then I receive a response code 202 And Response on GET http://127.0.0.1:8008/patroni does not contain pending_restart after 10 seconds And postgres0 role is the primary after 10 seconds Scenario: check API requests for the primary-replica pair in the pause mode Given I run patronictl.py pause batman Then I receive a response returncode 0 When I start postgres1 Then replication works from postgres0 to postgres1 after 20 seconds When I issue a GET request to http://127.0.0.1:8009/replica Then I receive a response code 200 And I receive a response state running And I receive a response role replica When I run patronictl.py reinit batman postgres1 --force Then I receive a response returncode 0 And I receive a response output "Success: reinitialize for member postgres1" When I run patronictl.py restart batman postgres0 --force Then I receive a response returncode 0 And I receive a response output "Success: restart on member postgres0" And postgres0 role is the primary after 5 seconds When I sleep for 10 seconds Then postgres1 role is the secondary after 15 seconds Scenario: check the switchover via the API in the pause mode Given I issue a POST request to http://127.0.0.1:8008/switchover with {"leader": "postgres0", "candidate": "postgres1"} Then I receive a response code 200 And postgres1 is a leader after 5 seconds And postgres1 role is the primary after 10 seconds And postgres0 role is the secondary after 10 seconds And replication works from postgres1 to postgres0 after 20 seconds And "members/postgres0" key in DCS has state=running after 10 seconds When I issue a GET request to http://127.0.0.1:8008/master Then I receive a response code 503 When I issue a GET request to http://127.0.0.1:8008/replica Then I receive a response code 200 When I issue a GET request to http://127.0.0.1:8009/master Then I receive a response code 200 When I issue a GET request to http://127.0.0.1:8009/replica Then I receive a response code 503 Scenario: check the scheduled switchover Given I issue a scheduled switchover from postgres1 to postgres0 in 3 seconds Then I receive a response returncode 1 And I receive a response output "Can't schedule switchover in the paused state" When I run patronictl.py resume batman Then I receive a response returncode 0 Given I issue a scheduled switchover from postgres1 to postgres0 in 3 seconds Then I receive a response returncode 0 And postgres0 is a leader after 20 seconds And postgres0 role is the primary after 10 seconds And postgres1 role is the secondary after 10 seconds And replication works from postgres0 to postgres1 after 25 seconds And "members/postgres1" key in DCS has state=running after 10 seconds When I issue a GET request to http://127.0.0.1:8008/master Then I receive a response code 200 When I issue a GET request to http://127.0.0.1:8008/replica Then I receive a response code 503 When I issue a GET request to http://127.0.0.1:8009/master Then I receive a response code 503 When I issue a GET request to http://127.0.0.1:8009/replica Then I receive a response code 200 patroni-1.6.4/features/standby_cluster.feature000066400000000000000000000054021361356115100216010ustar00rootroot00000000000000Feature: standby cluster Scenario: check permanent logical slots are preserved on failover/switchover Given I start postgres1 Then postgres1 is a leader after 10 seconds And there is a non empty initialize key in DCS after 15 seconds When I issue a PATCH request to http://127.0.0.1:8009/config with {"loop_wait": 2, "slots": {"pm_1": {"type": "physical"}}, "postgresql": {"parameters": {"wal_level": "logical"}}} Then I receive a response code 200 And Response on GET http://127.0.0.1:8009/config contains slots after 10 seconds And I sleep for 3 seconds When I issue a PATCH request to http://127.0.0.1:8009/config with {"slots": {"test_logical": {"type": "logical", "database": "postgres", "plugin": "test_decoding"}}} Then I receive a response code 200 And I do a backup of postgres1 When I start postgres0 with callback configured Then "members/postgres0" key in DCS has state=running after 10 seconds And replication works from postgres1 to postgres0 after 15 seconds When I shut down postgres1 Then postgres0 is a leader after 10 seconds And "members/postgres0" key in DCS has role=master after 3 seconds When I issue a GET request to http://127.0.0.1:8008/ Then I receive a response code 200 And there is a label with "test_logical" in postgres0 data directory Scenario: check replication of a single table in a standby cluster Given I start postgres1 in a standby cluster batman1 as a clone of postgres0 Then postgres1 is a leader of batman1 after 10 seconds When I add the table foo to postgres0 Then table foo is present on postgres1 after 20 seconds When I issue a GET request to http://127.0.0.1:8009/master Then I receive a response code 200 When I issue a GET request to http://127.0.0.1:8009/standby_leader Then I receive a response code 200 And I receive a response role standby_leader And there is a postgres1_cb.log with "on_role_change standby_leader batman1" in postgres1 data directory When I start postgres2 in a cluster batman1 Then postgres2 role is the replica after 24 seconds And table foo is present on postgres2 after 20 seconds Scenario: check failover When I kill postgres1 And I kill postmaster on postgres1 Then postgres2 is replicating from postgres0 after 32 seconds When I issue a GET request to http://127.0.0.1:8010/master Then I receive a response code 200 When I issue a GET request to http://127.0.0.1:8010/standby_leader Then I receive a response code 200 And I receive a response role standby_leader And replication works from postgres0 to postgres2 after 15 seconds And there is a postgres2_cb.log with "on_start replica batman1\non_role_change standby_leader batman1" in postgres2 data directory patroni-1.6.4/features/steps/000077500000000000000000000000001361356115100161545ustar00rootroot00000000000000patroni-1.6.4/features/steps/basic_replication.py000066400000000000000000000044021361356115100222000ustar00rootroot00000000000000import psycopg2 as pg from behave import step, then from time import sleep, time @step('I start {name:w}') def start_patroni(context, name): return context.pctl.start(name) @step('I shut down {name:w}') def stop_patroni(context, name): return context.pctl.stop(name, timeout=60) @step('I kill {name:w}') def kill_patroni(context, name): return context.pctl.stop(name, kill=True) @step('I kill postmaster on {name:w}') def stop_postgres(context, name): return context.pctl.stop(name, postgres=True) @step('I add the table {table_name:w} to {pg_name:w}') def add_table(context, table_name, pg_name): # parse the configuration file and get the port try: context.pctl.query(pg_name, "CREATE TABLE {0}()".format(table_name)) except pg.Error as e: assert False, "Error creating table {0} on {1}: {2}".format(table_name, pg_name, e) @then('Table {table_name:w} is present on {pg_name:w} after {max_replication_delay:d} seconds') def table_is_present_on(context, table_name, pg_name, max_replication_delay): max_replication_delay *= context.timeout_multiplier for _ in range(int(max_replication_delay)): if context.pctl.query(pg_name, "SELECT 1 FROM {0}".format(table_name), fail_ok=True) is not None: break sleep(1) else: assert False,\ "Table {0} is not present on {1} after {2} seconds".format(table_name, pg_name, max_replication_delay) @then('{pg_name:w} role is the {pg_role:w} after {max_promotion_timeout:d} seconds') def check_role(context, pg_name, pg_role, max_promotion_timeout): max_promotion_timeout *= context.timeout_multiplier assert context.pctl.check_role_has_changed_to(pg_name, pg_role, timeout=int(max_promotion_timeout)),\ "{0} role didn't change to {1} after {2} seconds".format(pg_name, pg_role, max_promotion_timeout) @step('replication works from {master:w} to {replica:w} after {time_limit:d} seconds') @then('replication works from {master:w} to {replica:w} after {time_limit:d} seconds') def replication_works(context, master, replica, time_limit): context.execute_steps(u""" When I add the table test_{0} to {1} Then table test_{0} is present on {2} after {3} seconds """.format(int(time()), master, replica, time_limit)) patroni-1.6.4/features/steps/cascading_replication.py000066400000000000000000000034071361356115100230370ustar00rootroot00000000000000import json import time from behave import step, then @step('I configure and start {name:w} with a tag {tag_name:w} {tag_value:w}') def start_patroni_with_a_name_value_tag(context, name, tag_name, tag_value): return context.pctl.start(name, custom_config={'tags': {tag_name: tag_value}}) @then('There is a {label} with "{content}" in {name:w} data directory') def check_label(context, label, content, name): label = context.pctl.read_label(name, label) label = label.replace('\n', '\\n') assert content in label, "{0} doesn't contain {1}".format(label, content) @step('I create label with "{content:w}" in {name:w} data directory') def write_label(context, content, name): context.pctl.write_label(name, content) @step('"{name}" key in DCS has {key:w}={value:w} after {time_limit:d} seconds') def check_member(context, name, key, value, time_limit): time_limit *= context.timeout_multiplier max_time = time.time() + int(time_limit) while time.time() < max_time: try: response = json.loads(context.dcs_ctl.query(name)) if response.get(key) == value: return except Exception: pass time.sleep(1) assert False, "{0} does not have {1}={2} in dcs after {3} seconds".format(name, key, value, time_limit) @step('there is a non empty {key:w} key in DCS after {time_limit:d} seconds') def check_initialize(context, key, time_limit): time_limit *= context.timeout_multiplier max_time = time.time() + int(time_limit) while time.time() < max_time: try: if context.dcs_ctl.query(key): return except Exception: pass time.sleep(1) assert False, "There is no {0} in dcs after {1} seconds".format(key, time_limit) patroni-1.6.4/features/steps/custom_bootstrap.py000066400000000000000000000016751361356115100221460ustar00rootroot00000000000000import time from behave import step, then @step('I start {name:w} in a cluster {cluster_name:w} as a clone of {name2:w}') def start_cluster_clone(context, name, cluster_name, name2): context.pctl.clone(name2, cluster_name, name) @step('I start {name:w} in a cluster {cluster_name:w} from backup') def start_cluster_from_backup(context, name, cluster_name): context.pctl.bootstrap_from_backup(name, cluster_name) @then('{name:w} is a leader of {cluster_name:w} after {time_limit:d} seconds') def is_a_leader(context, name, cluster_name, time_limit): time_limit *= context.timeout_multiplier max_time = time.time() + int(time_limit) while (context.dcs_ctl.query("leader", scope=cluster_name) != name): time.sleep(1) assert time.time() < max_time, "{0} is not a leader in dcs after {1} seconds".format(name, time_limit) @step('I do a backup of {name:w}') def do_backup(context, name): context.pctl.backup(name) patroni-1.6.4/features/steps/patroni_api.py000066400000000000000000000133751361356115100210440ustar00rootroot00000000000000import json import os import parse import shlex import subprocess import sys import time import yaml from behave import register_type, step, then from dateutil import tz from datetime import datetime, timedelta from patroni.request import PatroniRequest tzutc = tz.tzutc() request_executor = PatroniRequest({'ctl': {'auth': 'username:password'}}) @parse.with_pattern(r'https?://(?:\w|\.|:|/)+') def parse_url(text): return text register_type(url=parse_url) # there is no way we can find out if the node has already # started as a leader without checking the DCS. We cannot # just rely on the database availability, since there is # a short gap between the time PostgreSQL becomes available # and Patroni assuming the leader role. @step('{name:w} is a leader after {time_limit:d} seconds') @then('{name:w} is a leader after {time_limit:d} seconds') def is_a_leader(context, name, time_limit): time_limit *= context.timeout_multiplier max_time = time.time() + int(time_limit) while (context.dcs_ctl.query("leader") != name): time.sleep(1) assert time.time() < max_time, "{0} is not a leader in dcs after {1} seconds".format(name, time_limit) @step('I sleep for {value:d} seconds') def sleep_for_n_seconds(context, value): time.sleep(int(value)) def _set_response(context, response): context.status_code = response.status data = response.data.decode('utf-8') ct = response.getheader('content-type', '') if ct.startswith('application/json') or\ ct.startswith('text/yaml') or\ ct.startswith('text/x-yaml') or\ ct.startswith('application/yaml') or\ ct.startswith('application/x-yaml'): try: context.response = yaml.safe_load(data) except ValueError: context.response = data else: context.response = data @step('I issue a GET request to {url:url}') def do_get(context, url): do_request(context, 'GET', url, None) @step('I issue an empty POST request to {url:url}') def do_post_empty(context, url): do_request(context, 'POST', url, None) @step('I issue a {request_method:w} request to {url:url} with {data}') def do_request(context, request_method, url, data): data = data and json.loads(data) try: r = request_executor.request(request_method, url, data) except Exception: context.status_code = context.response = None else: _set_response(context, r) @step('I run {cmd}') def do_run(context, cmd): cmd = [sys.executable, '-m', 'coverage', 'run', '--source=patroni', '-p'] + shlex.split(cmd) try: # XXX: Dirty hack! We need to take name/passwd from the config! env = os.environ.copy() env.update({'PATRONI_RESTAPI_USERNAME': 'username', 'PATRONI_RESTAPI_PASSWORD': 'password'}) response = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env) context.status_code = 0 except subprocess.CalledProcessError as e: response = e.output context.status_code = e.returncode context.response = response.decode('utf-8').strip() @then('I receive a response {component:w} {data}') def check_response(context, component, data): if component == 'code': assert context.status_code == int(data),\ "status code {0} != {1}, response: {2}".format(context.status_code, data, context.response) elif component == 'returncode': assert context.status_code == int(data), "return code {0} != {1}, {2}".format(context.status_code, data, context.response) elif component == 'text': assert context.response == data.strip('"'), "response {0} does not contain {1}".format(context.response, data) elif component == 'output': assert data.strip('"') in context.response, "response {0} does not contain {1}".format(context.response, data) else: assert component in context.response, "{0} is not part of the response".format(component) assert str(context.response[component]) == str(data), "{0} does not contain {1}".format(component, data) @step('I issue a scheduled switchover from {from_host:w} to {to_host:w} in {in_seconds:d} seconds') def scheduled_switchover(context, from_host, to_host, in_seconds): context.execute_steps(u""" Given I run patronictl.py switchover batman --master {0} --candidate {1} --scheduled "{2}" --force """.format(from_host, to_host, datetime.now(tzutc) + timedelta(seconds=int(in_seconds)))) @step('I issue a scheduled restart at {url:url} in {in_seconds:d} seconds with {data}') def scheduled_restart(context, url, in_seconds, data): data = data and json.loads(data) or {} data.update(schedule='{0}'.format((datetime.now(tzutc) + timedelta(seconds=int(in_seconds))).isoformat())) context.execute_steps(u"""Given I issue a POST request to {0}/restart with {1}""".format(url, json.dumps(data))) @step('I add tag {tag:w} {value:w} to {pg_name:w} config') def add_tag_to_config(context, tag, value, pg_name): context.pctl.add_tag_to_config(pg_name, tag, value) @then('Response on GET {url} contains {value} after {timeout:d} seconds') def check_http_response(context, url, value, timeout, negate=False): timeout *= context.timeout_multiplier for _ in range(int(timeout)): r = request_executor.request('GET', url) if (value in r.data.decode('utf-8')) != negate: break time.sleep(1) else: assert False,\ "Value {0} is {1} present in response after {2} seconds".format(value, "not" if not negate else "", timeout) @then('Response on GET {url} does not contain {value} after {timeout:d} seconds') def check_not_in_http_response(context, url, value, timeout): check_http_response(context, url, value, timeout, negate=True) patroni-1.6.4/features/steps/standby_cluster.py000066400000000000000000000053461361356115100217430ustar00rootroot00000000000000import os import sys import time from behave import step select_replication_query = """ SELECT * FROM pg_catalog.pg_stat_replication WHERE application_name = '{0}' """ callback = sys.executable + " features/callback2.py " @step('I start {name:w} with callback configured') def start_patroni_with_callbacks(context, name): return context.pctl.start(name, custom_config={ "postgresql": { "callbacks": { "on_role_change": sys.executable + " features/callback.py" } } }) @step('I start {name:w} in a cluster {cluster_name:w}') def start_patroni(context, name, cluster_name): return context.pctl.start(name, custom_config={ "scope": cluster_name, "postgresql": { "callbacks": {c: callback + name for c in ('on_start', 'on_stop', 'on_restart', 'on_role_change')}, "backup_restore": { "command": (sys.executable + " features/backup_restore.py --sourcedir=" + os.path.join(context.pctl.patroni_path, 'data', 'basebackup'))} } }) @step('I start {name:w} in a standby cluster {cluster_name:w} as a clone of {name2:w}') def start_patroni_standby_cluster(context, name, cluster_name, name2): # we need to remove patroni.dynamic.json in order to "bootstrap" standby cluster with existing PGDATA os.unlink(os.path.join(context.pctl._processes[name]._data_dir, 'patroni.dynamic.json')) port = context.pctl._processes[name2]._connkwargs.get('port') context.pctl._processes[name].update_config({ "scope": cluster_name, "bootstrap": { "dcs": { "ttl": 20, "loop_wait": 2, "retry_timeout": 5, "standby_cluster": { "host": "localhost", "port": port, "primary_slot_name": "pm_1", "create_replica_methods": ["backup_restore", "basebackup"] } } }, "postgresql": { "callbacks": {c: callback + name for c in ('on_start', 'on_stop', 'on_restart', 'on_role_change')} } }) return context.pctl.start(name) @step('{pg_name1:w} is replicating from {pg_name2:w} after {timeout:d} seconds') def check_replication_status(context, pg_name1, pg_name2, timeout): bound_time = time.time() + timeout while time.time() < bound_time: cur = context.pctl.query( pg_name2, select_replication_query.format(pg_name1), fail_ok=True ) if cur and len(cur.fetchall()) != 0: break time.sleep(1) else: assert False, "{0} is not replicating from {1} after {2} seconds".format(pg_name1, pg_name2, timeout) patroni-1.6.4/features/steps/watchdog.py000066400000000000000000000027041361356115100203310ustar00rootroot00000000000000from behave import step, then import time def polling_loop(timeout, interval=1): """Returns an iterator that returns values until timeout has passed. Timeout is measured from start of iteration.""" start_time = time.time() iteration = 0 end_time = start_time + timeout while time.time() < end_time: yield iteration iteration += 1 time.sleep(interval) @step('I start {name:w} with watchdog') def start_patroni_with_watchdog(context, name): return context.pctl.start(name, custom_config={'watchdog': True}) @step('{name:w} watchdog has been pinged after {timeout:d} seconds') def watchdog_was_pinged(context, name, timeout): for _ in polling_loop(timeout): if context.pctl.get_watchdog(name).was_pinged: return True return False @then('{name:w} watchdog has been closed') def watchdog_was_closed(context, name): assert context.pctl.get_watchdog(name).was_closed @step('I reset {name:w} watchdog state') def watchdog_reset_pinged(context, name): context.pctl.get_watchdog(name).reset() @then('{name:w} watchdog is triggered after {timeout:d} seconds') def watchdog_was_triggered(context, name, timeout): for _ in polling_loop(timeout): if context.pctl.get_watchdog(name).was_triggered: return True assert False @step('{name:w} hangs for {timeout:d} seconds') def patroni_hang(context, name, timeout): return context.pctl.patroni_hang(name, timeout) patroni-1.6.4/features/watchdog.feature000066400000000000000000000023151361356115100201740ustar00rootroot00000000000000Feature: watchdog Verify that watchdog gets pinged and triggered under appropriate circumstances. Scenario: watchdog is opened and pinged Given I start postgres0 with watchdog Then postgres0 is a leader after 10 seconds And postgres0 role is the primary after 10 seconds And postgres0 watchdog has been pinged after 10 seconds Scenario: watchdog is disabled during pause Given I run patronictl.py pause batman Then I receive a response returncode 0 When I sleep for 2 seconds Then postgres0 watchdog has been closed Scenario: watchdog is opened and pinged after resume Given I reset postgres0 watchdog state And I run patronictl.py resume batman Then I receive a response returncode 0 And postgres0 watchdog has been pinged after 10 seconds Scenario: watchdog is disabled when shutting down Given I shut down postgres0 Then postgres0 watchdog has been closed Scenario: watchdog is triggered if patroni stops responding Given I reset postgres0 watchdog state And I start postgres0 with watchdog Then postgres0 role is the primary after 10 seconds When postgres0 hangs for 30 seconds Then postgres0 watchdog is triggered after 30 seconds patroni-1.6.4/haproxy.cfg000066400000000000000000000010601361356115100153500ustar00rootroot00000000000000global maxconn 100 defaults log global mode tcp retries 2 timeout client 30m timeout connect 4s timeout server 30m timeout check 5s listen stats mode http bind *:7000 stats enable stats uri / listen batman bind *:5000 option httpchk http-check expect status 200 default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions server postgresql_127.0.0.1_5432 127.0.0.1:5432 maxconn 100 check port 8008 server postgresql_127.0.0.1_5433 127.0.0.1:5433 maxconn 100 check port 8009 patroni-1.6.4/kubernetes/000077500000000000000000000000001361356115100153475ustar00rootroot00000000000000patroni-1.6.4/kubernetes/Dockerfile000066400000000000000000000026131361356115100173430ustar00rootroot00000000000000FROM postgres:11 MAINTAINER Alexander Kukushkin RUN export DEBIAN_FRONTEND=noninteractive \ && echo 'APT::Install-Recommends "0";\nAPT::Install-Suggests "0";' > /etc/apt/apt.conf.d/01norecommend \ && apt-get update -y \ && apt-get upgrade -y \ && apt-cache depends patroni | sed -n -e 's/.* Depends: \(python3-.\+\)$/\1/p' \ | grep -Ev '^python3-(sphinx|etcd|consul|kazoo|kubernetes)' \ | xargs apt-get install -y vim-tiny curl jq locales git python3-pip python3-wheel \ ## Make sure we have a en_US.UTF-8 locale available && localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 \ && pip3 install setuptools \ && pip3 install 'git+https://github.com/zalando/patroni.git#egg=patroni[kubernetes]' \ && PGHOME=/home/postgres \ && mkdir -p $PGHOME \ && chown postgres $PGHOME \ && sed -i "s|/var/lib/postgresql.*|$PGHOME:/bin/bash|" /etc/passwd \ # Set permissions for OpenShift && chmod 775 $PGHOME \ && chmod 664 /etc/passwd \ # Clean up && apt-get remove -y git python3-pip python3-wheel \ && apt-get autoremove -y \ && apt-get clean -y \ && rm -rf /var/lib/apt/lists/* /root/.cache ADD entrypoint.sh / EXPOSE 5432 8008 ENV LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 EDITOR=/usr/bin/editor USER postgres WORKDIR /home/postgres CMD ["/bin/bash", "/entrypoint.sh"] patroni-1.6.4/kubernetes/entrypoint.sh000077500000000000000000000020461361356115100201230ustar00rootroot00000000000000#!/bin/bash if [[ $UID -ge 10000 ]]; then GID=$(id -g) sed -e "s/^postgres:x:[^:]*:[^:]*:/postgres:x:$UID:$GID:/" /etc/passwd > /tmp/passwd cat /tmp/passwd > /etc/passwd rm /tmp/passwd fi cat > /home/postgres/patroni.yml <<__EOF__ bootstrap: dcs: postgresql: use_pg_rewind: true initdb: - auth-host: md5 - auth-local: trust - encoding: UTF8 - locale: en_US.UTF-8 - data-checksums pg_hba: - host all all 0.0.0.0/0 md5 - host replication ${PATRONI_REPLICATION_USERNAME} ${PATRONI_KUBERNETES_POD_IP}/16 md5 restapi: connect_address: '${PATRONI_KUBERNETES_POD_IP}:8008' postgresql: connect_address: '${PATRONI_KUBERNETES_POD_IP}:5432' authentication: superuser: password: '${PATRONI_SUPERUSER_PASSWORD}' replication: password: '${PATRONI_REPLICATION_PASSWORD}' __EOF__ unset PATRONI_SUPERUSER_PASSWORD PATRONI_REPLICATION_PASSWORD export KUBERNETES_NAMESPACE=$PATRONI_KUBERNETES_NAMESPACE export POD_NAME=$PATRONI_NAME exec /usr/bin/python3 /usr/local/bin/patroni /home/postgres/patroni.ymlpatroni-1.6.4/kubernetes/openshift-example/000077500000000000000000000000001361356115100207775ustar00rootroot00000000000000patroni-1.6.4/kubernetes/openshift-example/README.md000066400000000000000000000025611361356115100222620ustar00rootroot00000000000000# Patroni OpenShift Configuration Patroni can be run in OpenShift. Based on the kubernetes configuration, the Dockerfile and Entrypoint has been modified to support the dynamic UID/GID configuration that is applied in OpenShift. This can be run under the standard `restricted` SCC. # Examples ## Create test project ``` oc new-project patroni-test ``` ## Build the image Note: Update the references when merged upstream. Note: If deploying as a template for multiple users, the following commands should be performed in a shared namespace like `openshift`. ``` oc import-image postgres:10 --confirm -n openshift oc new-build https://github.com/zalando/patroni --context-dir=kubernetes -n openshift ``` ## Deploy the Image Two configuration templates exist in [templates](templates) directory: - Patroni Ephemeral - Patroni Persistent The only difference is whether or not the statefulset requests persistent storage. ## Create the Template Install the template into the `openshift` namespace if this should be shared across projects: ``` oc create -f templates/template_patroni_ephemeral.yml -n openshift ``` Then, from your own project: ``` oc new-app patroni-pgsql-ephemeral ``` Once the pods are running, two configmaps should be available: ``` $ oc get configmap NAME DATA AGE patroniocp-config 0 1m patroniocp-leader 0 1m ```patroni-1.6.4/kubernetes/openshift-example/templates/000077500000000000000000000000001361356115100227755ustar00rootroot00000000000000patroni-1.6.4/kubernetes/openshift-example/templates/template_patroni_ephemeral.yml000066400000000000000000000202531361356115100311130ustar00rootroot00000000000000apiVersion: v1 kind: Template metadata: name: patroni-pgsql-ephemeral annotations: description: |- Patroni Postgresql database cluster, without persistent storage. WARNING: Any data stored will be lost upon pod destruction. Only use this template for testing. iconClass: icon-postgresql openshift.io/display-name: Patroni Postgresql (Ephemeral) openshift.io/long-description: This template deploys a a patroni postgresql HA cluster without persistent storage. tags: postgresql objects: - apiVersion: v1 kind: Service metadata: creationTimestamp: null labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} name: ${PATRONI_CLUSTER_NAME} spec: ports: - port: 5432 protocol: TCP targetPort: 5432 sessionAffinity: None type: ClusterIP status: loadBalancer: {} - apiVersion: v1 kind: Service metadata: creationTimestamp: null labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} name: ${PATRONI_MASTER_SERVICE_NAME} spec: ports: - port: 5432 protocol: TCP targetPort: 5432 selector: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} role: master sessionAffinity: None type: ClusterIP status: loadBalancer: {} - apiVersion: v1 kind: Secret metadata: name: ${PATRONI_CLUSTER_NAME} labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} stringData: superuser-password: ${PATRONI_SUPERUSER_PASSWORD} replication-password: ${PATRONI_REPLICATION_PASSWORD} - apiVersion: v1 kind: Service metadata: creationTimestamp: null labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} name: ${PATRONI_REPLICA_SERVICE_NAME} spec: ports: - port: 5432 protocol: TCP targetPort: 5432 selector: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} role: replica sessionAffinity: None type: ClusterIP status: loadBalancer: {} - apiVersion: apps/v1 kind: StatefulSet metadata: creationTimestamp: null generation: 3 labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} name: ${APPLICATION_NAME} spec: podManagementPolicy: OrderedReady replicas: 3 revisionHistoryLimit: 10 selector: matchLabels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} serviceName: ${APPLICATION_NAME} template: metadata: creationTimestamp: null labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} spec: containers: - env: - name: PATRONI_KUBERNETES_POD_IP valueFrom: fieldRef: apiVersion: v1 fieldPath: status.podIP - name: PATRONI_KUBERNETES_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - name: PATRONI_KUBERNETES_LABELS value: '{application: ${APPLICATION_NAME}, cluster-name: ${PATRONI_CLUSTER_NAME}}' - name: PATRONI_SUPERUSER_USERNAME value: ${PATRONI_SUPERUSER_USERNAME} - name: PATRONI_SUPERUSER_PASSWORD valueFrom: secretKeyRef: key: superuser-password name: ${PATRONI_CLUSTER_NAME} - name: PATRONI_REPLICATION_USERNAME value: ${PATRONI_REPLICATION_USERNAME} - name: PATRONI_REPLICATION_PASSWORD valueFrom: secretKeyRef: key: replication-password name: ${PATRONI_CLUSTER_NAME} - name: PATRONI_SCOPE value: ${PATRONI_CLUSTER_NAME} - name: PATRONI_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name - name: PATRONI_POSTGRESQL_DATA_DIR value: /home/postgres/pgdata/pgroot/data - name: PATRONI_POSTGRESQL_PGPASS value: /tmp/pgpass - name: PATRONI_POSTGRESQL_LISTEN value: 0.0.0.0:5432 - name: PATRONI_RESTAPI_LISTEN value: 0.0.0.0:8008 image: docker-registry.default.svc:5000/${NAMESPACE}/patroni:latest imagePullPolicy: IfNotPresent name: ${APPLICATION_NAME} ports: - containerPort: 8008 protocol: TCP - containerPort: 5432 protocol: TCP resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /home/postgres/pgdata name: pgdata dnsPolicy: ClusterFirst restartPolicy: Always schedulerName: default-scheduler securityContext: {} serviceAccount: ${SERVICE_ACCOUNT} serviceAccountName: ${SERVICE_ACCOUNT} terminationGracePeriodSeconds: 0 volumes: - name: pgdata emptyDir: {} updateStrategy: type: OnDelete - apiVersion: v1 kind: Endpoints metadata: name: ${APPLICATION_NAME} labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} subsets: [] - apiVersion: v1 kind: ServiceAccount metadata: name: ${SERVICE_ACCOUNT} - apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: ${SERVICE_ACCOUNT} rules: - apiGroups: - "" resources: - configmaps verbs: - create - get - list - patch - update - watch # delete is required only for 'patronictl remove' - delete - apiGroups: - "" resources: - endpoints verbs: - get - patch - update # the following three privileges are necessary only when using endpoints - create - list - watch # delete is required only for for 'patronictl remove' - delete - apiGroups: - "" resources: - pods verbs: - get - list - patch - update - watch - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: ${SERVICE_ACCOUNT} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: ${SERVICE_ACCOUNT} subjects: - kind: ServiceAccount name: ${SERVICE_ACCOUNT} parameters: - description: The name of the application for labelling all artifacts. displayName: Application Name name: APPLICATION_NAME value: patroni-ephemeral - description: The name of the patroni-pgsql cluster. displayName: Cluster Name name: PATRONI_CLUSTER_NAME value: patroni-ephemeral - description: The name of the OpenShift Service exposed for the patroni-ephemeral-master container. displayName: Master service name. name: PATRONI_MASTER_SERVICE_NAME value: patroni-ephemeral-master - description: The name of the OpenShift Service exposed for the patroni-ephemeral-replica containers. displayName: Replica service name. name: PATRONI_REPLICA_SERVICE_NAME value: patroni-ephemeral-replica - description: Maximum amount of memory the container can use. displayName: Memory Limit name: MEMORY_LIMIT value: 512Mi - description: The OpenShift Namespace where the patroni and postgresql ImageStream resides. displayName: ImageStream Namespace name: NAMESPACE value: openshift - description: Username of the superuser account for initialization. displayName: Superuser Username name: PATRONI_SUPERUSER_USERNAME value: postgres - description: Password of the superuser account for initialization. displayName: Superuser Passsword name: PATRONI_SUPERUSER_PASSWORD value: postgres - description: Username of the replication account for initialization. displayName: Replication Username name: PATRONI_REPLICATION_USERNAME value: postgres - description: Password of the replication account for initialization. displayName: Repication Passsword name: PATRONI_REPLICATION_PASSWORD value: postgres - description: Service account name used for pods and rolebindings to form a cluster in the project. displayName: Service Account name: SERVICE_ACCOUNT value: patroniocp patroni-1.6.4/kubernetes/openshift-example/templates/template_patroni_persistent.yaml000066400000000000000000000212201361356115100315050ustar00rootroot00000000000000apiVersion: v1 kind: Template metadata: name: patroni-pgsql-persistent annotations: description: |- Patroni Postgresql database cluster, with persistent storage. WARNING: Any data stored will be lost upon pod destruction. Only use this template for testing. iconClass: icon-postgresql openshift.io/display-name: Patroni Postgresql (Persistent) openshift.io/long-description: This template deploys a a patroni postgresql HA cluster without persistent storage. tags: postgresql objects: - apiVersion: v1 kind: Service metadata: creationTimestamp: null labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} name: ${PATRONI_CLUSTER_NAME} spec: ports: - port: 5432 protocol: TCP targetPort: 5432 sessionAffinity: None type: ClusterIP status: loadBalancer: {} - apiVersion: v1 kind: Service metadata: creationTimestamp: null labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} name: ${PATRONI_MASTER_SERVICE_NAME} spec: ports: - port: 5432 protocol: TCP targetPort: 5432 selector: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} role: master sessionAffinity: None type: ClusterIP status: loadBalancer: {} - apiVersion: v1 kind: Secret metadata: name: ${PATRONI_CLUSTER_NAME} labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} stringData: superuser-password: ${PATRONI_SUPERUSER_PASSWORD} replication-password: ${PATRONI_REPLICATION_PASSWORD} - apiVersion: v1 kind: Service metadata: creationTimestamp: null labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} name: ${PATRONI_REPLICA_SERVICE_NAME} spec: ports: - port: 5432 protocol: TCP targetPort: 5432 selector: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} role: replica sessionAffinity: None type: ClusterIP status: loadBalancer: {} - apiVersion: apps/v1 kind: StatefulSet metadata: creationTimestamp: null generation: 3 labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} name: ${APPLICATION_NAME} spec: podManagementPolicy: OrderedReady replicas: 3 revisionHistoryLimit: 10 selector: matchLabels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} serviceName: ${APPLICATION_NAME} template: metadata: creationTimestamp: null labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} spec: containers: - env: - name: PATRONI_KUBERNETES_POD_IP valueFrom: fieldRef: apiVersion: v1 fieldPath: status.podIP - name: PATRONI_KUBERNETES_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - name: PATRONI_KUBERNETES_LABELS value: '{application: ${APPLICATION_NAME}, cluster-name: ${PATRONI_CLUSTER_NAME}}' - name: PATRONI_SUPERUSER_USERNAME value: ${PATRONI_SUPERUSER_USERNAME} - name: PATRONI_SUPERUSER_PASSWORD valueFrom: secretKeyRef: key: superuser-password name: ${PATRONI_CLUSTER_NAME} - name: PATRONI_REPLICATION_USERNAME value: ${PATRONI_REPLICATION_USERNAME} - name: PATRONI_REPLICATION_PASSWORD valueFrom: secretKeyRef: key: replication-password name: ${PATRONI_CLUSTER_NAME} - name: PATRONI_SCOPE value: ${PATRONI_CLUSTER_NAME} - name: PATRONI_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name - name: PATRONI_POSTGRESQL_DATA_DIR value: /home/postgres/pgdata/pgroot/data - name: PATRONI_POSTGRESQL_PGPASS value: /tmp/pgpass - name: PATRONI_POSTGRESQL_LISTEN value: 0.0.0.0:5432 - name: PATRONI_RESTAPI_LISTEN value: 0.0.0.0:8008 image: docker-registry.default.svc:5000/${NAMESPACE}/patroni:latest imagePullPolicy: IfNotPresent name: ${APPLICATION_NAME} ports: - containerPort: 8008 protocol: TCP - containerPort: 5432 protocol: TCP resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /home/postgres/pgdata name: ${APPLICATION_NAME} dnsPolicy: ClusterFirst restartPolicy: Always schedulerName: default-scheduler securityContext: {} serviceAccount: ${SERVICE_ACCOUNT} serviceAccountName: ${SERVICE_ACCOUNT} terminationGracePeriodSeconds: 0 volumes: - name: ${APPLICATION_NAME} persistentVolumeClaim: claimName: ${APPLICATION_NAME} volumeClaimTemplates: - metadata: labels: application: ${APPLICATION_NAME} name: ${APPLICATION_NAME} spec: accessModes: - ReadWriteOnce resources: requests: storage: ${PVC_SIZE} updateStrategy: type: OnDelete - apiVersion: v1 kind: Endpoints metadata: name: ${APPLICATION_NAME} labels: application: ${APPLICATION_NAME} cluster-name: ${PATRONI_CLUSTER_NAME} subsets: [] - apiVersion: v1 kind: ServiceAccount metadata: name: ${SERVICE_ACCOUNT} - apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: ${SERVICE_ACCOUNT} rules: - apiGroups: - "" resources: - configmaps verbs: - create - get - list - patch - update - watch # delete is required only for 'patronictl remove' - delete - apiGroups: - "" resources: - endpoints verbs: - get - patch - update # the following three privileges are necessary only when using endpoints - create - list - watch # delete is required only for for 'patronictl remove' - delete - apiGroups: - "" resources: - pods verbs: - get - list - patch - update - watch - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: ${SERVICE_ACCOUNT} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: ${SERVICE_ACCOUNT} subjects: - kind: ServiceAccount name: ${SERVICE_ACCOUNT} parameters: - description: The name of the application for labelling all artifacts. displayName: Application Name name: APPLICATION_NAME value: patroni-persistent - description: The name of the patroni-pgsql cluster. displayName: Cluster Name name: PATRONI_CLUSTER_NAME value: patroni-persistent - description: The name of the OpenShift Service exposed for the patroni-persistent-master container. displayName: Master service name. name: PATRONI_MASTER_SERVICE_NAME value: patroni-persistent-master - description: The name of the OpenShift Service exposed for the patroni-persistent-replica containers. displayName: Replica service name. name: PATRONI_REPLICA_SERVICE_NAME value: patroni-persistent-replica - description: Maximum amount of memory the container can use. displayName: Memory Limit name: MEMORY_LIMIT value: 512Mi - description: The OpenShift Namespace where the patroni and postgresql ImageStream resides. displayName: ImageStream Namespace name: NAMESPACE value: openshift - description: Username of the superuser account for initialization. displayName: Superuser Username name: PATRONI_SUPERUSER_USERNAME value: postgres - description: Password of the superuser account for initialization. displayName: Superuser Passsword name: PATRONI_SUPERUSER_PASSWORD value: postgres - description: Username of the replication account for initialization. displayName: Replication Username name: PATRONI_REPLICATION_USERNAME value: postgres - description: Password of the replication account for initialization. displayName: Repication Passsword name: PATRONI_REPLICATION_PASSWORD value: postgres - description: Service account name used for pods and rolebindings to form a cluster in the project. displayName: Service Account name: SERVICE_ACCOUNT value: patroni-persistent - description: The size of the persistent volume to create. displayName: Persistent Volume Size name: PVC_SIZE value: 5Gipatroni-1.6.4/kubernetes/openshift-example/test/000077500000000000000000000000001361356115100217565ustar00rootroot00000000000000patroni-1.6.4/kubernetes/openshift-example/test/Jenkinsfile000066400000000000000000000023601361356115100241430ustar00rootroot00000000000000pipeline { agent any stages { stage ('Deploy test pod'){ when { expression { openshift.withCluster() { openshift.withProject() { return !openshift.selector( "dc", "pgbench" ).exists() } } } } steps { script { openshift.withCluster() { openshift.withProject() { def pgbench = openshift.newApp( "https://github.com/stewartshea/docker-pgbench/", "--name=pgbench", "-e PGPASSWORD=postgres", "-e PGUSER=postgres", "-e PGHOST=patroni-persistent-master", "-e PGDATABASE=postgres", "-e TEST_CLIENT_COUNT=20", "-e TEST_DURATION=120" ) def pgbenchdc = openshift.selector( "dc", "pgbench" ) timeout(5) { pgbenchdc.rollout().status() } } } } } } stage ('Run benchmark Test'){ steps { sh ''' oc exec $(oc get pods -l app=pgbench | grep Running | awk '{print $1}') ./test.sh ''' } } stage ('Clean up pgtest pod'){ steps { sh ''' oc delete all -l app=pgbench ''' } } } } patroni-1.6.4/kubernetes/openshift-example/test/README.md000066400000000000000000000002701361356115100232340ustar00rootroot00000000000000# Jenkins Test This pipeline test will create a separate deployment config for a pgbench pod and execute a test against the patroni cluster. This is a sample and should be customized. patroni-1.6.4/kubernetes/patroni_k8s.yaml000066400000000000000000000112041361356115100204720ustar00rootroot00000000000000# headless service to avoid deletion of patronidemo-config endpoint apiVersion: v1 kind: Service metadata: name: patronidemo-config labels: application: patroni cluster-name: patronidemo spec: clusterIP: None --- apiVersion: apps/v1beta1 kind: StatefulSet metadata: name: &cluster_name patronidemo labels: application: patroni cluster-name: *cluster_name spec: replicas: 3 serviceName: *cluster_name template: metadata: labels: application: patroni cluster-name: *cluster_name spec: serviceAccountName: patronidemo containers: - name: *cluster_name image: patroni # docker build -t patroni . imagePullPolicy: IfNotPresent ports: - containerPort: 8008 protocol: TCP - containerPort: 5432 protocol: TCP volumeMounts: - mountPath: /home/postgres/pgdata name: pgdata env: - name: PATRONI_KUBERNETES_POD_IP valueFrom: fieldRef: fieldPath: status.podIP - name: PATRONI_KUBERNETES_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: PATRONI_KUBERNETES_USE_ENDPOINTS value: 'true' - name: PATRONI_KUBERNETES_LABELS value: '{application: patroni, cluster-name: patronidemo}' - name: PATRONI_SUPERUSER_USERNAME value: postgres - name: PATRONI_SUPERUSER_PASSWORD valueFrom: secretKeyRef: name: *cluster_name key: superuser-password - name: PATRONI_REPLICATION_USERNAME value: standby - name: PATRONI_REPLICATION_PASSWORD valueFrom: secretKeyRef: name: *cluster_name key: replication-password - name: PATRONI_SCOPE value: *cluster_name - name: PATRONI_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: PATRONI_POSTGRESQL_DATA_DIR value: /home/postgres/pgdata/pgroot/data - name: PATRONI_POSTGRESQL_PGPASS value: /tmp/pgpass - name: PATRONI_POSTGRESQL_LISTEN value: '0.0.0.0:5432' - name: PATRONI_RESTAPI_LISTEN value: '0.0.0.0:8008' terminationGracePeriodSeconds: 0 volumes: - name: pgdata emptyDir: {} # volumeClaimTemplates: # - metadata: # labels: # application: spilo # spilo-cluster: *cluster_name # annotations: # volume.alpha.kubernetes.io/storage-class: anything # name: pgdata # spec: # accessModes: # - ReadWriteOnce # resources: # requests: # storage: 5Gi --- apiVersion: v1 kind: Endpoints metadata: name: &cluster_name patronidemo labels: application: patroni cluster-name: *cluster_name subsets: [] --- apiVersion: v1 kind: Service metadata: name: &cluster_name patronidemo labels: application: patroni cluster-name: *cluster_name spec: type: ClusterIP ports: - port: 5432 targetPort: 5432 --- apiVersion: v1 kind: Secret metadata: name: &cluster_name patronidemo labels: application: patroni cluster-name: *cluster_name type: Opaque data: superuser-password: emFsYW5kbw== replication-password: cmVwLXBhc3M= --- apiVersion: v1 kind: ServiceAccount metadata: name: patronidemo --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: patronidemo rules: - apiGroups: - "" resources: - configmaps verbs: - create - get - list - patch - update - watch # delete and deletecollection are required only for 'patronictl remove' - delete - deletecollection - apiGroups: - "" resources: - endpoints verbs: - get - patch - update # the following three privileges are necessary only when using endpoints - create - list - watch # delete and deletecollection are required only for for 'patronictl remove' - delete - deletecollection - apiGroups: - "" resources: - pods verbs: - get - list - patch - update - watch # The following privilege is only necessary for creation of headless service # for patronidemo-config endpoint, in order to prevent cleaning it up by the # k8s master. You can avoid giving this privilege by explicitly creating the # service like it is done in this manifest (lines 2..10) - apiGroups: - "" resources: - services verbs: - create --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: patronidemo roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: patronidemo subjects: - kind: ServiceAccount name: patronidemo patroni-1.6.4/mkbinary.sh000077500000000000000000000001711361356115100153520ustar00rootroot00000000000000#!/bin/sh set -e pip install --ignore-installed setuptools==19.2 pyinstaller pyinstaller --clean --onefile patroni.spec patroni-1.6.4/patroni.py000077500000000000000000000001271361356115100152310ustar00rootroot00000000000000#!/usr/bin/env python from patroni import main if __name__ == '__main__': main() patroni-1.6.4/patroni.spec000066400000000000000000000014471361356115100155360ustar00rootroot00000000000000# -*- mode: python -*- block_cipher = None def hiddenimports(): import sys sys.path.insert(0, '.') try: import patroni.dcs return patroni.dcs.dcs_modules() finally: sys.path.pop(0) a = Analysis(['patroni/__main__.py'], pathex=[], binaries=None, datas=None, hiddenimports=hiddenimports(), hookspath=[], runtime_hooks=[], excludes=[], win_no_prefer_redirects=False, win_private_assemblies=False, cipher=block_cipher) pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher) exe = EXE(pyz, a.scripts, a.binaries, a.zipfiles, a.datas, name='patroni', debug=False, strip=False, upx=True, console=True) patroni-1.6.4/patroni/000077500000000000000000000000001361356115100146545ustar00rootroot00000000000000patroni-1.6.4/patroni/__init__.py000066400000000000000000000206041361356115100167670ustar00rootroot00000000000000import logging import os import signal import sys import time from patroni.version import __version__ logger = logging.getLogger(__name__) PATRONI_ENV_PREFIX = 'PATRONI_' class Patroni(object): def __init__(self, conf): from patroni.api import RestApiServer from patroni.dcs import get_dcs from patroni.ha import Ha from patroni.log import PatroniLogger from patroni.postgresql import Postgresql from patroni.request import PatroniRequest from patroni.watchdog import Watchdog self.setup_signal_handlers() self.version = __version__ self.logger = PatroniLogger() self.config = conf self.logger.reload_config(self.config.get('log', {})) self.dcs = get_dcs(self.config) self.watchdog = Watchdog(self.config) self.load_dynamic_configuration() self.postgresql = Postgresql(self.config['postgresql']) self.api = RestApiServer(self, self.config['restapi']) self.request = PatroniRequest(self.config, True) self.ha = Ha(self) self.tags = self.get_tags() self.next_run = time.time() self.scheduled_restart = {} def load_dynamic_configuration(self): from patroni.exceptions import DCSError while True: try: cluster = self.dcs.get_cluster() if cluster and cluster.config and cluster.config.data: if self.config.set_dynamic_configuration(cluster.config): self.dcs.reload_config(self.config) self.watchdog.reload_config(self.config) elif not self.config.dynamic_configuration and 'bootstrap' in self.config: if self.config.set_dynamic_configuration(self.config['bootstrap']['dcs']): self.dcs.reload_config(self.config) break except DCSError: logger.warning('Can not get cluster from dcs') time.sleep(5) def get_tags(self): return {tag: value for tag, value in self.config.get('tags', {}).items() if tag not in ('clonefrom', 'nofailover', 'noloadbalance', 'nosync') or value} @property def nofailover(self): return bool(self.tags.get('nofailover', False)) @property def nosync(self): return bool(self.tags.get('nosync', False)) def reload_config(self, sighup=False): try: self.tags = self.get_tags() self.logger.reload_config(self.config.get('log', {})) self.watchdog.reload_config(self.config) if sighup: self.request.reload_config(self.config) self.api.reload_config(self.config['restapi']) self.postgresql.reload_config(self.config['postgresql'], sighup) self.dcs.reload_config(self.config) except Exception: logger.exception('Failed to reload config_file=%s', self.config.config_file) @property def replicatefrom(self): return self.tags.get('replicatefrom') def sighup_handler(self, *args): self._received_sighup = True def sigterm_handler(self, *args): with self._sigterm_lock: if not self._received_sigterm: self._received_sigterm = True sys.exit() @property def noloadbalance(self): return bool(self.tags.get('noloadbalance', False)) def schedule_next_run(self): self.next_run += self.dcs.loop_wait current_time = time.time() nap_time = self.next_run - current_time if nap_time <= 0: self.next_run = current_time # Release the GIL so we don't starve anyone waiting on async_executor lock time.sleep(0.001) # Warn user that Patroni is not keeping up logger.warning("Loop time exceeded, rescheduling immediately.") elif self.ha.watch(nap_time): self.next_run = time.time() @property def received_sigterm(self): with self._sigterm_lock: return self._received_sigterm def run(self): self.api.start() self.logger.start() self.next_run = time.time() while not self.received_sigterm: if self._received_sighup: self._received_sighup = False if self.config.reload_local_configuration(): self.reload_config(True) else: self.postgresql.config.reload_config(self.config['postgresql'], True) logger.info(self.ha.run_cycle()) if self.dcs.cluster and self.dcs.cluster.config and self.dcs.cluster.config.data \ and self.config.set_dynamic_configuration(self.dcs.cluster.config): self.reload_config() if self.postgresql.role != 'uninitialized': self.config.save_cache() self.schedule_next_run() def setup_signal_handlers(self): from threading import Lock self._received_sighup = False self._sigterm_lock = Lock() self._received_sigterm = False if os.name != 'nt': signal.signal(signal.SIGHUP, self.sighup_handler) signal.signal(signal.SIGTERM, self.sigterm_handler) def shutdown(self): with self._sigterm_lock: self._received_sigterm = True try: self.api.shutdown() except Exception: logger.exception('Exception during RestApi.shutdown') try: self.ha.shutdown() except Exception: logger.exception('Exception during Ha.shutdown') self.logger.shutdown() def patroni_main(): import argparse from patroni.config import Config, ConfigParseError parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version='%(prog)s {0}'.format(__version__)) parser.add_argument('configfile', nargs='?', default='', help='Patroni may also read the configuration from the {0} environment variable' .format(Config.PATRONI_CONFIG_VARIABLE)) args = parser.parse_args() try: conf = Config(args.configfile) except ConfigParseError as e: if e.value: print(e.value) parser.print_help() sys.exit(1) patroni = Patroni(conf) try: patroni.run() except KeyboardInterrupt: pass finally: patroni.shutdown() def fatal(string, *args): sys.stderr.write('FATAL: ' + string.format(*args) + '\n') sys.exit(1) def check_psycopg2(): min_psycopg2 = (2, 5, 4) min_psycopg2_str = '.'.join(map(str, min_psycopg2)) def parse_version(version): for e in version.split('.'): try: yield int(e) except ValueError: break try: import psycopg2 version_str = psycopg2.__version__.split(' ')[0] version = tuple(parse_version(version_str)) if version < min_psycopg2: fatal('Patroni requires psycopg2>={0}, but only {1} is available', min_psycopg2_str, version_str) except ImportError: fatal('Patroni requires psycopg2>={0} or psycopg2-binary', min_psycopg2_str) def main(): if os.getpid() != 1: check_psycopg2() return patroni_main() # Patroni started with PID=1, it looks like we are in the container pid = 0 # Looks like we are in a docker, so we will act like init def sigchld_handler(signo, stack_frame): try: while True: ret = os.waitpid(-1, os.WNOHANG) if ret == (0, 0): break elif ret[0] != pid: logger.info('Reaped pid=%s, exit status=%s', *ret) except OSError: pass def passtochild(signo, stack_frame): if pid: os.kill(pid, signo) if os.name != 'nt': signal.signal(signal.SIGCHLD, sigchld_handler) signal.signal(signal.SIGHUP, passtochild) signal.signal(signal.SIGQUIT, passtochild) signal.signal(signal.SIGUSR1, passtochild) signal.signal(signal.SIGUSR2, passtochild) signal.signal(signal.SIGINT, passtochild) signal.signal(signal.SIGABRT, passtochild) signal.signal(signal.SIGTERM, passtochild) import multiprocessing patroni = multiprocessing.Process(target=patroni_main) patroni.start() pid = patroni.pid patroni.join() patroni-1.6.4/patroni/__main__.py000066400000000000000000000001011361356115100167360ustar00rootroot00000000000000from patroni import main if __name__ == '__main__': main() patroni-1.6.4/patroni/api.py000066400000000000000000000664711361356115100160150ustar00rootroot00000000000000import base64 import json import logging import psycopg2 import time import traceback import dateutil.parser import datetime import os import six import socket from patroni.exceptions import PostgresConnectionException, PostgresException from patroni.postgresql.misc import postgres_version_to_int from patroni.utils import deep_compare, parse_bool, patch_config, Retry, \ RetryFailedError, parse_int, split_host_port, tzutc, uri, cluster_as_json from six.moves.BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer from six.moves.socketserver import ThreadingMixIn from threading import Thread logger = logging.getLogger(__name__) class RestApiHandler(BaseHTTPRequestHandler): def _write_response(self, status_code, body, content_type='text/html', headers=None): self.send_response(status_code) headers = headers or {} if content_type: headers['Content-Type'] = content_type for name, value in headers.items(): self.send_header(name, value) self.end_headers() self.wfile.write(body.encode('utf-8')) def _write_json_response(self, status_code, response): self._write_response(status_code, json.dumps(response), content_type='application/json') def check_auth(func): """Decorator function to check authorization header or client certificates Usage example: @check_auth def do_PUT_foo(): pass """ def wrapper(self, *args, **kwargs): if self.server.check_auth(self): return func(self, *args, **kwargs) return wrapper def _write_status_response(self, status_code, response): patroni = self.server.patroni tags = patroni.ha.get_effective_tags() if tags: response['tags'] = tags if patroni.postgresql.sysid: response['database_system_identifier'] = patroni.postgresql.sysid if patroni.postgresql.pending_restart: response['pending_restart'] = True response['patroni'] = {'version': patroni.version, 'scope': patroni.postgresql.scope} if patroni.scheduled_restart and isinstance(patroni.scheduled_restart, dict): response['scheduled_restart'] = patroni.scheduled_restart.copy() del response['scheduled_restart']['postmaster_start_time'] response['scheduled_restart']['schedule'] = (response['scheduled_restart']['schedule']).isoformat() if not patroni.ha.watchdog.is_healthy: response['watchdog_failed'] = True if patroni.ha.is_paused(): response['pause'] = True qsize = patroni.logger.queue_size if qsize > patroni.logger.NORMAL_LOG_QUEUE_SIZE: response['logger_queue_size'] = qsize lost = patroni.logger.records_lost if lost: response['logger_records_lost'] = lost self._write_json_response(status_code, response) def do_GET(self, write_status_code_only=False): """Default method for processing all GET requests which can not be routed to other methods""" time_start = time.time() request_type = 'OPTIONS' if write_status_code_only else 'GET' path = '/master' if self.path == '/' else self.path response = self.get_postgresql_status() patroni = self.server.patroni cluster = patroni.dcs.cluster if not cluster and patroni.ha.is_paused(): primary_status_code = 200 if response['role'] == 'master' else 503 else: primary_status_code = 200 if patroni.ha.is_leader() else 503 replica_status_code = 200 if not patroni.noloadbalance and \ response.get('role') == 'replica' and response.get('state') == 'running' else 503 status_code = 503 if patroni.ha.is_standby_cluster() and ('standby_leader' in path or 'standby-leader' in path): status_code = 200 if patroni.ha.is_leader() else 503 elif 'master' in path or 'leader' in path or 'primary' in path or 'read-write' in path: status_code = primary_status_code elif 'replica' in path: status_code = replica_status_code elif 'read-only' in path: status_code = 200 if primary_status_code == 200 else replica_status_code elif 'health' in path: status_code = 200 if response.get('state') == 'running' else 503 elif cluster: # dcs is available is_synchronous = cluster.is_synchronous_mode() and cluster.sync \ and cluster.sync.sync_standby == patroni.postgresql.name if path in ('/sync', '/synchronous') and is_synchronous: status_code = replica_status_code elif path in ('/async', '/asynchronous') and not is_synchronous: status_code = replica_status_code if write_status_code_only: # when haproxy sends OPTIONS request it reads only status code and nothing more message = self.responses[status_code][0] self.wfile.write('{0} {1} {2}\r\n'.format(self.protocol_version, status_code, message).encode('utf-8')) else: self._write_status_response(status_code, response) time_end = time.time() self.log_message('%s %s %s latency: %s ms', request_type, path, status_code, (time_end - time_start) * 1000) def do_OPTIONS(self): self.do_GET(write_status_code_only=True) def do_GET_patroni(self): response = self.get_postgresql_status(True) self._write_status_response(200, response) def do_GET_cluster(self): cluster = self.server.patroni.dcs.cluster or self.server.patroni.dcs.get_cluster() self._write_json_response(200, cluster_as_json(cluster)) def do_GET_history(self): cluster = self.server.patroni.dcs.cluster or self.server.patroni.dcs.get_cluster() self._write_json_response(200, cluster.history and cluster.history.lines or []) def do_GET_config(self): cluster = self.server.patroni.dcs.cluster or self.server.patroni.dcs.get_cluster() if cluster.config: self._write_json_response(200, cluster.config.data) else: self.send_error(502) def _read_json_content(self, body_is_optional=False): if 'content-length' not in self.headers: return self.send_error(411) if not body_is_optional else {} try: content_length = int(self.headers.get('content-length')) if content_length == 0 and body_is_optional: return {} request = json.loads(self.rfile.read(content_length).decode('utf-8')) if isinstance(request, dict) and (request or body_is_optional): return request except Exception: logger.exception('Bad request') self.send_error(400) @check_auth def do_PATCH_config(self): request = self._read_json_content() if request: cluster = self.server.patroni.dcs.get_cluster() data = cluster.config.data.copy() if patch_config(data, request): value = json.dumps(data, separators=(',', ':')) if not self.server.patroni.dcs.set_config_value(value, cluster.config.index): return self.send_error(409) self.server.patroni.ha.wakeup() self._write_json_response(200, data) @check_auth def do_PUT_config(self): request = self._read_json_content() if request: cluster = self.server.patroni.dcs.get_cluster() if not deep_compare(request, cluster.config.data): value = json.dumps(request, separators=(',', ':')) if not self.server.patroni.dcs.set_config_value(value): return self.send_error(502) self._write_json_response(200, request) @check_auth def do_POST_reload(self): self.server.patroni.sighup_handler() self._write_response(202, 'reload scheduled') @staticmethod def parse_schedule(schedule, action): """ parses the given schedule and validates at """ error = None scheduled_at = None try: scheduled_at = dateutil.parser.parse(schedule) if scheduled_at.tzinfo is None: error = 'Timezone information is mandatory for the scheduled {0}'.format(action) status_code = 400 elif scheduled_at < datetime.datetime.now(tzutc): error = 'Cannot schedule {0} in the past'.format(action) status_code = 422 else: status_code = None except (ValueError, TypeError): logger.exception('Invalid scheduled %s time: %s', action, schedule) error = 'Unable to parse scheduled timestamp. It should be in an unambiguous format, e.g. ISO 8601' status_code = 422 return (status_code, error, scheduled_at) @check_auth def do_POST_restart(self): status_code = 500 data = 'restart failed' request = self._read_json_content(body_is_optional=True) cluster = self.server.patroni.dcs.get_cluster() if request is None: # failed to parse the json return if request: logger.debug("received restart request: {0}".format(request)) if cluster.is_paused() and 'schedule' in request: self._write_response(status_code, "Can't schedule restart in the paused state") return for k in request: if k == 'schedule': (_, data, request[k]) = self.parse_schedule(request[k], "restart") if _: status_code = _ break elif k == 'role': if request[k] not in ('master', 'replica'): status_code = 400 data = "PostgreSQL role should be either master or replica" break elif k == 'postgres_version': try: postgres_version_to_int(request[k]) except PostgresException as e: status_code = 400 data = e.value break elif k == 'timeout': request[k] = parse_int(request[k], 's') if request[k] is None or request[k] <= 0: status_code = 400 data = "Timeout should be a positive number of seconds" break elif k != 'restart_pending': status_code = 400 data = "Unknown filter for the scheduled restart: {0}".format(k) break else: if 'schedule' not in request: try: status, data = self.server.patroni.ha.restart(request) status_code = 200 if status else 503 except Exception: logger.exception('Exception during restart') status_code = 400 else: if self.server.patroni.ha.schedule_future_restart(request): data = "Restart scheduled" status_code = 202 else: data = "Another restart is already scheduled" status_code = 409 self._write_response(status_code, data) @check_auth def do_DELETE_restart(self): if self.server.patroni.ha.delete_future_restart(): data = "scheduled restart deleted" code = 200 else: data = "no restarts are scheduled" code = 404 self._write_response(code, data) @check_auth def do_POST_reinitialize(self): request = self._read_json_content(body_is_optional=True) if request: logger.debug('received reinitialize request: %s', request) force = isinstance(request, dict) and parse_bool(request.get('force')) or False data = self.server.patroni.ha.reinitialize(force) if data is None: status_code = 200 data = 'reinitialize started' else: status_code = 503 self._write_response(status_code, data) def poll_failover_result(self, leader, candidate, action): timeout = max(10, self.server.patroni.dcs.loop_wait) for _ in range(0, timeout*2): time.sleep(1) try: cluster = self.server.patroni.dcs.get_cluster() if not cluster.is_unlocked() and cluster.leader.name != leader: if not candidate or candidate == cluster.leader.name: return 200, 'Successfully {0}ed over to "{1}"'.format(action[:-4], cluster.leader.name) else: return 200, '{0}ed over to "{1}" instead of "{2}"'.format(action[:-4].title(), cluster.leader.name, candidate) if not cluster.failover: return 503, action.title() + ' failed' except Exception as e: logger.debug('Exception occured during polling %s result: %s', action, e) return 503, action.title() + ' status unknown' def is_failover_possible(self, cluster, leader, candidate, action): if leader and (not cluster.leader or cluster.leader.name != leader): return 'leader name does not match' if candidate: if action == 'switchover' and cluster.is_synchronous_mode() and cluster.sync.sync_standby != candidate: return 'candidate name does not match with sync_standby' members = [m for m in cluster.members if m.name == candidate] if not members: return 'candidate does not exists' elif cluster.is_synchronous_mode(): members = [m for m in cluster.members if m.name == cluster.sync.sync_standby] if not members: return action + ' is not possible: can not find sync_standby' else: members = [m for m in cluster.members if m.name != cluster.leader.name and m.api_url] if not members: return action + ' is not possible: cluster does not have members except leader' for st in self.server.patroni.ha.fetch_nodes_statuses(members): if st.failover_limitation() is None: return None return action + ' is not possible: no good candidates have been found' @check_auth def do_POST_failover(self, action='failover'): request = self._read_json_content() (status_code, data) = (400, '') if not request: return leader = request.get('leader') candidate = request.get('candidate') or request.get('member') scheduled_at = request.get('scheduled_at') cluster = self.server.patroni.dcs.get_cluster() logger.info("received %s request with leader=%s candidate=%s scheduled_at=%s", action, leader, candidate, scheduled_at) if action == 'failover' and not candidate: data = 'Failover could be performed only to a specific candidate' elif action == 'switchover' and not leader: data = 'Switchover could be performed only from a specific leader' if not data and scheduled_at: if not leader: data = 'Scheduled {0} is possible only from a specific leader'.format(action) if not data and cluster.is_paused(): data = "Can't schedule {0} in the paused state".format(action) if not data: (status_code, data, scheduled_at) = self.parse_schedule(scheduled_at, action) if not data and cluster.is_paused() and not candidate: data = action.title() + ' is possible only to a specific candidate in a paused state' if not data and not scheduled_at: data = self.is_failover_possible(cluster, leader, candidate, action) if data: status_code = 412 if not data: if self.server.patroni.dcs.manual_failover(leader, candidate, scheduled_at=scheduled_at): self.server.patroni.ha.wakeup() if scheduled_at: data = action.title() + ' scheduled' status_code = 202 else: status_code, data = self.poll_failover_result(cluster.leader and cluster.leader.name, candidate, action) else: data = 'failed to write {0} key into DCS'.format(action) status_code = 503 self._write_response(status_code, data) def do_POST_switchover(self): self.do_POST_failover(action='switchover') def parse_request(self): """Override parse_request method to enrich basic functionality of `BaseHTTPRequestHandler` class Original class can only invoke do_GET, do_POST, do_PUT, etc method implementations if they are defined. But we would like to have at least some simple routing mechanism, i.e.: GET /uri1/part2 request should invoke `do_GET_uri1()` POST /other should invoke `do_POST_other()` If the `do__` method does not exists we'll fallback to original behavior.""" ret = BaseHTTPRequestHandler.parse_request(self) if ret: mname = self.path.lstrip('/').split('/')[0] mname = self.command + ('_' + mname if mname else '') if hasattr(self, 'do_' + mname): self.command = mname return ret def query(self, sql, *params, **kwargs): if not kwargs.get('retry', False): return self.server.query(sql, *params) retry = Retry(delay=1, retry_exceptions=PostgresConnectionException) return retry(self.server.query, sql, *params) def get_postgresql_status(self, retry=False): try: cluster = self.server.patroni.dcs.cluster if self.server.patroni.postgresql.state not in ('running', 'restarting', 'starting'): raise RetryFailedError('') stmt = ("SELECT pg_catalog.to_char(pg_catalog.pg_postmaster_start_time(), 'YYYY-MM-DD HH24:MI:SS.MS TZ')," " CASE WHEN pg_catalog.pg_is_in_recovery() THEN 0" " ELSE ('x' || pg_catalog.substr(pg_catalog.pg_{0}file_name(" "pg_catalog.pg_current_{0}_{1}()), 1, 8))::bit(32)::int END," " CASE WHEN pg_catalog.pg_is_in_recovery() THEN 0" " ELSE pg_catalog.pg_{0}_{1}_diff(pg_catalog.pg_current_{0}_{1}(), '0/0')::bigint END," " pg_catalog.pg_{0}_{1}_diff(COALESCE(pg_catalog.pg_last_{0}_receive_{1}()," " pg_catalog.pg_last_{0}_replay_{1}()), '0/0')::bigint," " pg_catalog.pg_{0}_{1}_diff(pg_catalog.pg_last_{0}_replay_{1}(), '0/0')::bigint," " pg_catalog.to_char(pg_catalog.pg_last_xact_replay_timestamp(), 'YYYY-MM-DD HH24:MI:SS.MS TZ')," " pg_catalog.pg_is_in_recovery() AND pg_catalog.pg_is_{0}_replay_paused(), " " pg_catalog.array_to_json(pg_catalog.array_agg(pg_catalog.row_to_json(ri))) " "FROM (SELECT (SELECT rolname FROM pg_authid WHERE oid = usesysid) AS usename," " application_name, client_addr, w.state, sync_state, sync_priority" " FROM pg_catalog.pg_stat_get_wal_senders() w, pg_catalog.pg_stat_get_activity(pid)) AS ri") row = self.query(stmt.format(self.server.patroni.postgresql.wal_name, self.server.patroni.postgresql.lsn_name), retry=retry)[0] result = { 'state': self.server.patroni.postgresql.state, 'postmaster_start_time': row[0], 'role': 'replica' if row[1] == 0 else 'master', 'server_version': self.server.patroni.postgresql.server_version, 'cluster_unlocked': bool(not cluster or cluster.is_unlocked()), 'xlog': ({ 'received_location': row[3], 'replayed_location': row[4], 'replayed_timestamp': row[5], 'paused': row[6]} if row[1] == 0 else { 'location': row[2] }) } if result['role'] == 'replica' and self.server.patroni.ha.is_standby_cluster(): result['role'] = self.server.patroni.postgresql.role if row[1] > 0: result['timeline'] = row[1] else: leader_timeline = None if not cluster or cluster.is_unlocked() else cluster.leader.timeline result['timeline'] = self.server.patroni.postgresql.replica_cached_timeline(leader_timeline) if row[7]: result['replication'] = row[7] return result except (psycopg2.Error, RetryFailedError, PostgresConnectionException): state = self.server.patroni.postgresql.state if state == 'running': logger.exception('get_postgresql_status') state = 'unknown' return {'state': state, 'role': self.server.patroni.postgresql.role} def log_message(self, fmt, *args): logger.debug("API thread: %s - - [%s] %s", self.client_address[0], self.log_date_time_string(), fmt % args) class RestApiServer(ThreadingMixIn, HTTPServer, Thread): # On 3.7+ the `ThreadingMixIn` gathers all non-daemon worker threads in order to join on them at server close. daemon_threads = True # Make worker threads "fire and forget" to prevent a memory leak. def __init__(self, patroni, config): self.patroni = patroni self.__listen = None self.__ssl_options = None self.reload_config(config) self.daemon = True def query(self, sql, *params): cursor = None try: with self.patroni.postgresql.connection().cursor() as cursor: cursor.execute(sql, params) return [r for r in cursor] except psycopg2.Error as e: if cursor and cursor.connection.closed == 0: raise e raise PostgresConnectionException('connection problems') @staticmethod def _set_fd_cloexec(fd): if os.name != 'nt': import fcntl flags = fcntl.fcntl(fd, fcntl.F_GETFD) fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) def check_basic_auth_key(self, key): return self.__auth_key == key def check_auth_header(self, auth_header): if self.__auth_key: if auth_header is None: return 'no auth header received' if not auth_header.startswith('Basic ') or not self.check_basic_auth_key(auth_header[6:]): return 'not authenticated' def check_auth(self, rh): if not hasattr(rh.request, 'getpeercert') or not rh.request.getpeercert(): # valid client cert isn't present if self.__protocol == 'https' and self.__ssl_options.get('verify_client') in ('required', 'optional'): return rh._write_response(403, 'client certificate required') reason = self.check_auth_header(rh.headers.get('Authorization')) if reason: headers = {'WWW-Authenticate': 'Basic realm="' + self.patroni.__class__.__name__ + '"'} return rh._write_response(401, reason, headers=headers) return True @staticmethod def __has_dual_stack(): if hasattr(socket, 'AF_INET6') and hasattr(socket, 'IPPROTO_IPV6') and hasattr(socket, 'IPV6_V6ONLY'): sock = None try: sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, False) return True except socket.error as e: logger.debug('Error when working with ipv6 socket: %s', e) finally: if sock: sock.close() return False def __httpserver_init(self, host, port): dual_stack = self.__has_dual_stack() if host in ('', '*'): host = None info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE) # in case dual stack is not supported we want IPv4 to be preferred over IPv6 info.sort(key=lambda x: x[0] == socket.AF_INET, reverse=not dual_stack) self.address_family = info[0][0] try: HTTPServer.__init__(self, info[0][-1][:2], RestApiHandler) except socket.error: logger.error( "Couldn't start a service on '%s:%s', please check your `restapi.listen` configuration", host, port) raise def __initialize(self, listen, ssl_options): try: host, port = split_host_port(listen, None) except Exception: raise ValueError('Invalid "restapi" config: expected : for "listen", but got "{0}"' .format(listen)) reloading_config = self.__listen is not None # changing config in runtime if reloading_config: self.shutdown() self.__listen = listen self.__ssl_options = ssl_options self.__httpserver_init(host, port) Thread.__init__(self, target=self.serve_forever) self._set_fd_cloexec(self.socket) # wrap socket with ssl if 'certfile' is defined in a config.yaml # Sometime it's also needed to pass reference to a 'keyfile'. self.__protocol = 'https' if ssl_options.get('certfile') else 'http' if self.__protocol == 'https': import ssl ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=ssl_options.get('cafile')) ctx.load_cert_chain(certfile=ssl_options['certfile'], keyfile=ssl_options.get('keyfile')) verify_client = ssl_options.get('verify_client') if verify_client: modes = {'none': ssl.CERT_NONE, 'optional': ssl.CERT_OPTIONAL, 'required': ssl.CERT_REQUIRED} if verify_client in modes: ctx.verify_mode = modes[verify_client] else: logger.error('Bad value in the "restapi.verify_client": %s', verify_client) self.socket = ctx.wrap_socket(self.socket, server_side=True) if reloading_config: self.start() def reload_config(self, config): if 'listen' not in config: # changing config in runtime raise ValueError('Can not find "restapi.listen" config') ssl_options = {n: config[n] for n in ('certfile', 'keyfile', 'cafile') if n in config} if isinstance(config.get('verify_client'), six.string_types): ssl_options['verify_client'] = config['verify_client'].lower() if self.__listen != config['listen'] or self.__ssl_options != ssl_options: self.__initialize(config['listen'], ssl_options) self.__auth_key = base64.b64encode(config['auth'].encode('utf-8')).decode('utf-8') if 'auth' in config else None self.connection_string = uri(self.__protocol, config.get('connect_address') or self.__listen, 'patroni') @staticmethod def handle_error(request, client_address): address, port = client_address logger.warning('Exception happened during processing of request from {}:{}'.format(address, port)) logger.warning(traceback.format_exc()) patroni-1.6.4/patroni/async_executor.py000066400000000000000000000110651361356115100202640ustar00rootroot00000000000000import logging from threading import Event, Lock, RLock, Thread logger = logging.getLogger(__name__) class CriticalTask(object): """Represents a critical task in a background process that we either need to cancel or get the result of. Fields of this object may be accessed only when holding a lock on it. To perform the critical task the background thread must, while holding lock on this object, check `is_cancelled` flag, run the task and mark the task as complete using `complete()`. The main thread must hold async lock to prevent the task from completing, hold lock on critical task object, call cancel. If the task has completed `cancel()` will return False and `result` field will contain the result of the task. When cancel returns True it is guaranteed that the background task will notice the `is_cancelled` flag. """ def __init__(self): self._lock = Lock() self.is_cancelled = False self.result = None def reset(self): """Must be called every time the background task is finished. Must be called from async thread. Caller must hold lock on async executor when calling.""" self.is_cancelled = False self.result = None def cancel(self): """Tries to cancel the task, returns True if the task has already run. Caller must hold lock on async executor and the task when calling.""" if self.result is not None: return False self.is_cancelled = True return True def complete(self, result): """Mark task as completed along with a result. Must be called from async thread. Caller must hold lock on task when calling.""" self.result = result def __enter__(self): self._lock.acquire() return self def __exit__(self, exc_type, exc_val, exc_tb): self._lock.release() class AsyncExecutor(object): def __init__(self, cancellable, ha_wakeup): self._cancellable = cancellable self._ha_wakeup = ha_wakeup self._thread_lock = RLock() self._scheduled_action = None self._scheduled_action_lock = RLock() self._is_cancelled = False self._finish_event = Event() self.critical_task = CriticalTask() @property def busy(self): return self.scheduled_action is not None def schedule(self, action): with self._scheduled_action_lock: if self._scheduled_action is not None: return self._scheduled_action self._scheduled_action = action self._is_cancelled = False self._finish_event.set() return None @property def scheduled_action(self): with self._scheduled_action_lock: return self._scheduled_action def reset_scheduled_action(self): with self._scheduled_action_lock: self._scheduled_action = None def run(self, func, args=()): wakeup = False try: with self: if self._is_cancelled: return self._finish_event.clear() self._cancellable.reset_is_cancelled() # if the func returned something (not None) - wake up main HA loop wakeup = func(*args) if args else func() return wakeup except Exception: logger.exception('Exception during execution of long running task %s', self.scheduled_action) finally: with self: self.reset_scheduled_action() self._finish_event.set() with self.critical_task: self.critical_task.reset() if wakeup is not None: self._ha_wakeup() def run_async(self, func, args=()): Thread(target=self.run, args=(func, args)).start() def try_run_async(self, action, func, args=()): prev = self.schedule(action) if prev is None: return self.run_async(func, args) return 'Failed to run {0}, {1} is already in progress'.format(action, prev) def cancel(self): with self: with self._scheduled_action_lock: if self._scheduled_action is None: return logger.warning('Cancelling long running task %s', self._scheduled_action) self._is_cancelled = True self._cancellable.cancel() self._finish_event.wait() with self: self.reset_scheduled_action() def __enter__(self): self._thread_lock.acquire() def __exit__(self, *args): self._thread_lock.release() patroni-1.6.4/patroni/config.py000066400000000000000000000412461361356115100165020ustar00rootroot00000000000000import json import logging import os import shutil import tempfile import yaml from collections import defaultdict from copy import deepcopy from patroni import PATRONI_ENV_PREFIX from patroni.exceptions import ConfigParseError from patroni.dcs import ClusterConfig from patroni.postgresql.config import CaseInsensitiveDict, ConfigHandler from patroni.utils import deep_compare, parse_bool, parse_int, patch_config logger = logging.getLogger(__name__) _AUTH_ALLOWED_PARAMETERS = ( 'username', 'password', 'sslmode', 'sslcert', 'sslkey', 'sslrootcert', 'sslcrl' ) def default_validator(conf): if not conf: return "Config is empty." class Config(object): """ This class is responsible for: 1) Building and giving access to `effective_configuration` from: * `Config.__DEFAULT_CONFIG` -- some sane default values * `dynamic_configuration` -- configuration stored in DCS * `local_configuration` -- configuration from `config.yml` or environment 2) Saving and loading `dynamic_configuration` into 'patroni.dynamic.json' file located in local_configuration['postgresql']['data_dir'] directory. This is necessary to be able to restore `dynamic_configuration` if DCS was accidentally wiped 3) Loading of configuration file in the old format and converting it into new format 4) Mimicking some of the `dict` interfaces to make it possible to work with it as with the old `config` object. """ PATRONI_CONFIG_VARIABLE = PATRONI_ENV_PREFIX + 'CONFIGURATION' __CACHE_FILENAME = 'patroni.dynamic.json' __DEFAULT_CONFIG = { 'ttl': 30, 'loop_wait': 10, 'retry_timeout': 10, 'maximum_lag_on_failover': 1048576, 'check_timeline': False, 'master_start_timeout': 300, 'synchronous_mode': False, 'synchronous_mode_strict': False, 'standby_cluster': { 'create_replica_methods': '', 'host': '', 'port': '', 'primary_slot_name': '', 'restore_command': '', 'archive_cleanup_command': '', 'recovery_min_apply_delay': '' }, 'postgresql': { 'bin_dir': '', 'use_slots': True, 'parameters': CaseInsensitiveDict({p: v[0] for p, v in ConfigHandler.CMDLINE_OPTIONS.items()}) }, 'watchdog': { 'mode': 'automatic', } } def __init__(self, configfile, validator=default_validator): self._modify_index = -1 self._dynamic_configuration = {} self.__environment_configuration = self._build_environment_configuration() # Patroni reads the configuration from the command-line argument if it exists, otherwise from the environment self._config_file = configfile and os.path.isfile(configfile) and configfile if self._config_file: self._local_configuration = self._load_config_file() else: config_env = os.environ.pop(self.PATRONI_CONFIG_VARIABLE, None) self._local_configuration = config_env and yaml.safe_load(config_env) or self.__environment_configuration if validator: error = validator(self._local_configuration) if error: raise ConfigParseError(error) self.__effective_configuration = self._build_effective_configuration({}, self._local_configuration) self._data_dir = self.__effective_configuration.get('postgresql', {}).get('data_dir', "") self._cache_file = os.path.join(self._data_dir, self.__CACHE_FILENAME) self._load_cache() self._cache_needs_saving = False @property def config_file(self): return self._config_file @property def dynamic_configuration(self): return deepcopy(self._dynamic_configuration) def check_mode(self, mode): return bool(parse_bool(self._dynamic_configuration.get(mode))) def _load_config_file(self): """Loads config.yaml from filesystem and applies some values which were set via ENV""" with open(self._config_file) as f: config = yaml.safe_load(f) patch_config(config, self.__environment_configuration) return config def _load_cache(self): if os.path.isfile(self._cache_file): try: with open(self._cache_file) as f: self.set_dynamic_configuration(json.load(f)) except Exception: logger.exception('Exception when loading file: %s', self._cache_file) def save_cache(self): if self._cache_needs_saving: tmpfile = fd = None try: (fd, tmpfile) = tempfile.mkstemp(prefix=self.__CACHE_FILENAME, dir=self._data_dir) with os.fdopen(fd, 'w') as f: fd = None json.dump(self.dynamic_configuration, f) tmpfile = shutil.move(tmpfile, self._cache_file) self._cache_needs_saving = False except Exception: logger.exception('Exception when saving file: %s', self._cache_file) if fd: try: os.close(fd) except Exception: logger.error('Can not close temporary file %s', tmpfile) if tmpfile and os.path.exists(tmpfile): try: os.remove(tmpfile) except Exception: logger.error('Can not remove temporary file %s', tmpfile) # configuration could be either ClusterConfig or dict def set_dynamic_configuration(self, configuration): if isinstance(configuration, ClusterConfig): if self._modify_index == configuration.modify_index: return False # If the index didn't changed there is nothing to do self._modify_index = configuration.modify_index configuration = configuration.data if not deep_compare(self._dynamic_configuration, configuration): try: self.__effective_configuration = self._build_effective_configuration(configuration, self._local_configuration) self._dynamic_configuration = configuration self._cache_needs_saving = True return True except Exception: logger.exception('Exception when setting dynamic_configuration') def reload_local_configuration(self): if self.config_file: try: configuration = self._load_config_file() if not deep_compare(self._local_configuration, configuration): new_configuration = self._build_effective_configuration(self._dynamic_configuration, configuration) self._local_configuration = configuration self.__effective_configuration = new_configuration return True else: logger.info('No local configuration items changed.') except Exception: logger.exception('Exception when reloading local configuration from %s', self.config_file) @staticmethod def _process_postgresql_parameters(parameters, is_local=False): return {name: value for name, value in (parameters or {}).items() if name not in ConfigHandler.CMDLINE_OPTIONS or not is_local and ConfigHandler.CMDLINE_OPTIONS[name][1](value)} def _safe_copy_dynamic_configuration(self, dynamic_configuration): config = deepcopy(self.__DEFAULT_CONFIG) for name, value in dynamic_configuration.items(): if name == 'postgresql': for name, value in (value or {}).items(): if name == 'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value)) elif name not in ('connect_address', 'listen', 'data_dir', 'pgpass', 'authentication'): config['postgresql'][name] = deepcopy(value) elif name == 'standby_cluster': for name, value in (value or {}).items(): if name in self.__DEFAULT_CONFIG['standby_cluster']: config['standby_cluster'][name] = deepcopy(value) elif name in config: # only variables present in __DEFAULT_CONFIG allowed to be overriden from DCS if name in ('synchronous_mode', 'synchronous_mode_strict'): config[name] = value else: config[name] = int(value) return config @staticmethod def _build_environment_configuration(): ret = defaultdict(dict) def _popenv(name): return os.environ.pop(PATRONI_ENV_PREFIX + name.upper(), None) for param in ('name', 'namespace', 'scope'): value = _popenv(param) if value: ret[param] = value def _fix_log_env(name, oldname): value = _popenv(oldname) name = PATRONI_ENV_PREFIX + 'LOG_' + name.upper() if value and name not in os.environ: os.environ[name] = value for name, oldname in (('level', 'loglevel'), ('format', 'logformat'), ('dateformat', 'log_datefmt')): _fix_log_env(name, oldname) def _set_section_values(section, params): for param in params: value = _popenv(section + '_' + param) if value: ret[section][param] = value _set_section_values('restapi', ['listen', 'connect_address', 'certfile', 'keyfile', 'cafile', 'verify_client']) _set_section_values('ctl', ['insecure', 'cacert', 'certfile', 'keyfile']) _set_section_values('postgresql', ['listen', 'connect_address', 'config_dir', 'data_dir', 'pgpass', 'bin_dir']) _set_section_values('log', ['level', 'traceback_level', 'format', 'dateformat', 'max_queue_size', 'dir', 'file_size', 'file_num', 'loggers']) def _parse_dict(value): if not value.strip().startswith('{'): value = '{{{0}}}'.format(value) try: return yaml.safe_load(value) except Exception: logger.exception('Exception when parsing dict %s', value) return None value = ret.get('log', {}).pop('loggers', None) if value: value = _parse_dict(value) if value: ret['log']['loggers'] = value def _get_auth(name, params=None): ret = {} for param in params or _AUTH_ALLOWED_PARAMETERS[:2]: value = _popenv(name + '_' + param) if value: ret[param] = value return ret restapi_auth = _get_auth('restapi') if restapi_auth: ret['restapi']['authentication'] = restapi_auth authentication = {} for user_type in ('replication', 'superuser', 'rewind'): entry = _get_auth(user_type, _AUTH_ALLOWED_PARAMETERS) if entry: authentication[user_type] = entry if authentication: ret['postgresql']['authentication'] = authentication def _parse_list(value): if not (value.strip().startswith('-') or '[' in value): value = '[{0}]'.format(value) try: return yaml.safe_load(value) except Exception: logger.exception('Exception when parsing list %s', value) return None for param in list(os.environ.keys()): if param.startswith(PATRONI_ENV_PREFIX): # PATRONI_(ETCD|CONSUL|ZOOKEEPER|EXHIBITOR|...)_(HOSTS?|PORT|..) name, suffix = (param[8:].split('_', 1) + [''])[:2] if suffix in ('HOST', 'HOSTS', 'PORT', 'USE_PROXIES', 'PROTOCOL', 'SRV', 'URL', 'PROXY', 'CACERT', 'CERT', 'KEY', 'VERIFY', 'TOKEN', 'CHECKS', 'DC', 'CONSISTENCY', 'REGISTER_SERVICE', 'SERVICE_CHECK_INTERVAL', 'NAMESPACE', 'CONTEXT', 'USE_ENDPOINTS', 'SCOPE_LABEL', 'ROLE_LABEL', 'POD_IP', 'PORTS', 'LABELS') and name: value = os.environ.pop(param) if suffix == 'PORT': value = value and parse_int(value) elif suffix in ('HOSTS', 'PORTS', 'CHECKS'): value = value and _parse_list(value) elif suffix == 'LABELS': value = _parse_dict(value) elif suffix in ('USE_PROXIES', 'REGISTER_SERVICE'): value = parse_bool(value) if value: ret[name.lower()][suffix.lower()] = value if 'etcd' in ret: ret['etcd'].update(_get_auth('etcd')) users = {} for param in list(os.environ.keys()): if param.startswith(PATRONI_ENV_PREFIX): name, suffix = (param[8:].rsplit('_', 1) + [''])[:2] # PATRONI__PASSWORD=, PATRONI__OPTIONS= # CREATE USER "" WITH PASSWORD '' if name and suffix == 'PASSWORD': password = os.environ.pop(param) if password: users[name] = {'password': password} options = os.environ.pop(param[:-9] + '_OPTIONS', None) options = options and _parse_list(options) if options: users[name]['options'] = options if users: ret['bootstrap']['users'] = users return ret def _build_effective_configuration(self, dynamic_configuration, local_configuration): config = self._safe_copy_dynamic_configuration(dynamic_configuration) for name, value in local_configuration.items(): if name == 'postgresql': for name, value in (value or {}).items(): if name == 'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value, True)) elif name != 'use_slots': # replication slots must be enabled/disabled globally config['postgresql'][name] = deepcopy(value) elif name not in config or name in ['watchdog']: config[name] = deepcopy(value) if value else {} # restapi server expects to get restapi.auth = 'username:password' if 'restapi' in config and 'authentication' in config['restapi']: config['restapi']['auth'] = '{username}:{password}'.format(**config['restapi']['authentication']) # special treatment for old config # 'exhibitor' inside 'zookeeper': if 'zookeeper' in config and 'exhibitor' in config['zookeeper']: config['exhibitor'] = config['zookeeper'].pop('exhibitor') config.pop('zookeeper') pg_config = config['postgresql'] # no 'authentication' in 'postgresql', but 'replication' and 'superuser' if 'authentication' not in pg_config: pg_config['use_pg_rewind'] = 'pg_rewind' in pg_config pg_config['authentication'] = {u: pg_config[u] for u in ('replication', 'superuser') if u in pg_config} # no 'superuser' in 'postgresql'.'authentication' if 'superuser' not in pg_config['authentication'] and 'pg_rewind' in pg_config: pg_config['authentication']['superuser'] = pg_config['pg_rewind'] # handle setting additional connection parameters that may be available # in the configuration file, such as SSL connection parameters for name, value in pg_config['authentication'].items(): pg_config['authentication'][name] = {n: v for n, v in value.items() if n in _AUTH_ALLOWED_PARAMETERS} # no 'name' in config if 'name' not in config and 'name' in pg_config: config['name'] = pg_config['name'] updated_fields = ( 'name', 'scope', 'retry_timeout', 'synchronous_mode', 'synchronous_mode_strict', ) pg_config.update({p: config[p] for p in updated_fields if p in config}) return config def get(self, key, default=None): return self.__effective_configuration.get(key, default) def __contains__(self, key): return key in self.__effective_configuration def __getitem__(self, key): return self.__effective_configuration[key] def copy(self): return deepcopy(self.__effective_configuration) patroni-1.6.4/patroni/ctl.py000066400000000000000000001354221361356115100160170ustar00rootroot00000000000000''' Patroni Control ''' import click import codecs import datetime import dateutil.parser import cdiff import copy import difflib import io import json import logging import os import random import six import subprocess import sys import tempfile import time import tzlocal import yaml from click import ClickException from contextlib import contextmanager from patroni.dcs import get_dcs as _get_dcs from patroni.exceptions import PatroniException from patroni.postgresql import Postgresql from patroni.postgresql.misc import postgres_version_to_int from patroni.utils import cluster_as_json, patch_config, polling_loop from patroni.request import PatroniRequest from patroni.version import __version__ from prettytable import PrettyTable from six.moves.urllib_parse import urlparse CONFIG_DIR_PATH = click.get_app_dir('patroni') CONFIG_FILE_PATH = os.path.join(CONFIG_DIR_PATH, 'patronictl.yaml') DCS_DEFAULTS = {'zookeeper': {'port': 2181, 'template': "zookeeper:\n hosts: ['{host}:{port}']"}, 'exhibitor': {'port': 8181, 'template': "exhibitor:\n hosts: [{host}]\n port: {port}"}, 'consul': {'port': 8500, 'template': "consul:\n host: '{host}:{port}'"}, 'etcd': {'port': 2379, 'template': "etcd:\n host: '{host}:{port}'"}} class PatroniCtlException(ClickException): pass def parse_dcs(dcs): if dcs is None: return None elif '//' not in dcs: dcs = '//' + dcs parsed = urlparse(dcs) scheme = parsed.scheme port = int(parsed.port) if parsed.port else None if scheme == '': scheme = ([k for k, v in DCS_DEFAULTS.items() if v['port'] == port] or ['etcd'])[0] elif scheme not in DCS_DEFAULTS: raise PatroniCtlException('Unknown dcs scheme: {}'.format(scheme)) default = DCS_DEFAULTS[scheme] return yaml.safe_load(default['template'].format(host=parsed.hostname or 'localhost', port=port or default['port'])) def load_config(path, dcs): from patroni.config import Config if not (os.path.exists(path) and os.access(path, os.R_OK)): logging.debug('Ignoring configuration file "%s". It does not exists or is not readable.', path) else: logging.debug('Loading configuration from file %s', path) config = Config(path, validator=None).copy() dcs = parse_dcs(dcs) or parse_dcs(config.get('dcs_api')) or {} if dcs: for d in DCS_DEFAULTS: config.pop(d, None) config.update(dcs) return config def store_config(config, path): dir_path = os.path.dirname(path) if dir_path and not os.path.isdir(dir_path): os.makedirs(dir_path) with open(path, 'w') as fd: yaml.dump(config, fd) option_format = click.option('--format', '-f', 'fmt', help='Output format (pretty, json, yaml)', default='pretty') option_watchrefresh = click.option('-w', '--watch', type=float, help='Auto update the screen every X seconds') option_watch = click.option('-W', is_flag=True, help='Auto update the screen every 2 seconds') option_force = click.option('--force', is_flag=True, help='Do not ask for confirmation at any point') arg_cluster_name = click.argument('cluster_name', required=False, default=lambda: click.get_current_context().obj.get('scope')) option_insecure = click.option('-k', '--insecure', is_flag=True, help='Allow connections to SSL sites without certs') @click.group() @click.option('--config-file', '-c', help='Configuration file', envvar='PATRONICTL_CONFIG_FILE', default=CONFIG_FILE_PATH) @click.option('--dcs', '-d', help='Use this DCS', envvar='DCS') @option_insecure @click.pass_context def ctl(ctx, config_file, dcs, insecure): level = 'WARNING' for name in ('LOGLEVEL', 'PATRONI_LOGLEVEL', 'PATRONI_LOG_LEVEL'): level = os.environ.get(name, level) logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=level) logging.captureWarnings(True) # Capture eventual SSL warning ctx.obj = load_config(config_file, dcs) # backward compatibility for configuration file where ctl section is not define ctx.obj.setdefault('ctl', {})['insecure'] = ctx.obj.get('ctl', {}).get('insecure') or insecure def get_dcs(config, scope): config.update({'scope': scope, 'patronictl': True}) config.setdefault('name', scope) try: return _get_dcs(config) except PatroniException as e: raise PatroniCtlException(str(e)) def request_patroni(member, method='GET', endpoint=None, data=None): ctx = click.get_current_context() # the current click context request_executor = ctx.obj.get('__request_patroni') if not request_executor: request_executor = ctx.obj['__request_patroni'] = PatroniRequest(ctx.obj) return request_executor(member, method, endpoint, data) def print_output(columns, rows=None, alignment=None, fmt='pretty', header=True, delimiter='\t'): rows = rows or [] if fmt == 'pretty': t = PrettyTable(columns) for k, v in (alignment or {}).items(): t.align[k] = v for r in rows: t.add_row(r) click.echo(t) return if fmt in ['json', 'yaml', 'yml']: elements = [dict(zip(columns, r)) for r in rows] if fmt == 'json': click.echo(json.dumps(elements)) elif fmt in ('yaml', 'yml'): click.echo(yaml.safe_dump(elements, encoding=None, default_flow_style=False, allow_unicode=True, width=200)) if fmt == 'tsv': if columns is not None and header: click.echo(delimiter.join(columns)) for r in rows: c = [str(c) for c in r] click.echo(delimiter.join(c)) def watching(w, watch, max_count=None, clear=True): """ >>> len(list(watching(True, 1, 0))) 1 >>> len(list(watching(True, 1, 1))) 2 >>> len(list(watching(True, None, 0))) 1 """ if w and not watch: watch = 2 if watch and clear: click.clear() yield 0 if max_count is not None and max_count < 1: return counter = 1 while watch and counter <= (max_count or counter): time.sleep(watch) counter += 1 if clear: click.clear() yield 0 def get_all_members(cluster, role='master'): if role == 'master': if cluster.leader is not None: yield cluster.leader return leader_name = (cluster.leader.member.name if cluster.leader else None) for m in cluster.members: if role == 'any' or role == 'replica' and m.name != leader_name: yield m def get_any_member(cluster, role='master', member=None): members = get_all_members(cluster, role) for m in members: if member is None or m.name == member: return m def get_cursor(cluster, connect_parameters, role='master', member=None): member = get_any_member(cluster, role=role, member=member) if member is None: return None params = member.conn_kwargs(connect_parameters) params.update({'fallback_application_name': 'Patroni ctl', 'connect_timeout': '5'}) if 'database' in connect_parameters: params['database'] = connect_parameters['database'] else: params.pop('database') import psycopg2 conn = psycopg2.connect(**params) conn.autocommit = True cursor = conn.cursor() if role == 'any': return cursor cursor.execute('SELECT pg_catalog.pg_is_in_recovery()') in_recovery = cursor.fetchone()[0] if in_recovery and role == 'replica' or not in_recovery and role == 'master': return cursor conn.close() return None def get_members(cluster, cluster_name, member_names, role, force, action, ask_confirmation=True): candidates = {m.name: m for m in cluster.members} if not force or role: if not member_names and not candidates: raise PatroniCtlException('{0} cluster doesn\'t have any members'.format(cluster_name)) output_members(cluster, cluster_name) if role: role_names = [m.name for m in get_all_members(cluster, role)] if member_names: member_names = list(set(member_names) & set(role_names)) if not member_names: raise PatroniCtlException('No {0} among provided members'.format(role)) else: member_names = role_names if not member_names and not force: member_names = [click.prompt('Which member do you want to {0} [{1}]?'.format(action, ', '.join(candidates.keys())), type=str, default='')] for member_name in member_names: if member_name not in candidates: raise PatroniCtlException('{0} is not a member of cluster'.format(member_name)) members = [candidates[n] for n in member_names] if ask_confirmation: confirm_members_action(members, force, action) return members def confirm_members_action(members, force, action, scheduled_at=None): if scheduled_at: if not force: confirm = click.confirm('Are you sure you want to schedule {0} of members {1} at {2}?' .format(action, ', '.join([m.name for m in members]), scheduled_at)) if not confirm: raise PatroniCtlException('Aborted scheduled {0}'.format(action)) else: if not force: confirm = click.confirm('Are you sure you want to {0} members {1}?' .format(action, ', '.join([m.name for m in members]))) if not confirm: raise PatroniCtlException('Aborted {0}'.format(action)) @ctl.command('dsn', help='Generate a dsn for the provided member, defaults to a dsn of the master') @click.option('--role', '-r', help='Give a dsn of any member with this role', type=click.Choice(['master', 'replica', 'any']), default=None) @click.option('--member', '-m', help='Generate a dsn for this member', type=str) @arg_cluster_name @click.pass_obj def dsn(obj, cluster_name, role, member): if role is not None and member is not None: raise PatroniCtlException('--role and --member are mutually exclusive options') if member is None and role is None: role = 'master' cluster = get_dcs(obj, cluster_name).get_cluster() m = get_any_member(cluster, role=role, member=member) if m is None: raise PatroniCtlException('Can not find a suitable member') params = m.conn_kwargs() click.echo('host={host} port={port}'.format(**params)) @ctl.command('query', help='Query a Patroni PostgreSQL member') @arg_cluster_name @option_format @click.option('--format', 'fmt', help='Output format (pretty, json)', default='tsv') @click.option('--file', '-f', 'p_file', help='Execute the SQL commands from this file', type=click.File('rb')) @click.option('--password', help='force password prompt', is_flag=True) @click.option('-U', '--username', help='database user name', type=str) @option_watch @option_watchrefresh @click.option('--role', '-r', help='The role of the query', type=click.Choice(['master', 'replica', 'any']), default=None) @click.option('--member', '-m', help='Query a specific member', type=str) @click.option('--delimiter', help='The column delimiter', default='\t') @click.option('--command', '-c', help='The SQL commands to execute') @click.option('-d', '--dbname', help='database name to connect to', type=str) @click.pass_obj def query( obj, cluster_name, role, member, w, watch, delimiter, command, p_file, password, username, dbname, fmt='tsv', ): if role is not None and member is not None: raise PatroniCtlException('--role and --member are mutually exclusive options') if member is None and role is None: role = 'master' if p_file is not None and command is not None: raise PatroniCtlException('--file and --command are mutually exclusive options') if p_file is None and command is None: raise PatroniCtlException('You need to specify either --command or --file') connect_parameters = {} if username: connect_parameters['username'] = username if password: connect_parameters['password'] = click.prompt('Password', hide_input=True, type=str) if dbname: connect_parameters['database'] = dbname if p_file is not None: command = p_file.read() dcs = get_dcs(obj, cluster_name) cursor = None for _ in watching(w, watch, clear=False): if cursor is None: cluster = dcs.get_cluster() output, cursor = query_member(cluster, cursor, member, role, command, connect_parameters) print_output(None, output, fmt=fmt, delimiter=delimiter) def query_member(cluster, cursor, member, role, command, connect_parameters): import psycopg2 try: if cursor is None: cursor = get_cursor(cluster, connect_parameters, role=role, member=member) if cursor is None: if role is None: message = 'No connection to member {0} is available'.format(member) else: message = 'No connection to role={0} is available'.format(role) logging.debug(message) return [[timestamp(0), message]], None cursor.execute('SELECT pg_catalog.pg_is_in_recovery()') in_recovery = cursor.fetchone()[0] if in_recovery and role == 'master' or not in_recovery and role == 'replica': cursor.connection.close() return None, None cursor.execute(command) return cursor.fetchall(), cursor except (psycopg2.OperationalError, psycopg2.DatabaseError) as oe: logging.debug(oe) if cursor is not None and not cursor.connection.closed: cursor.connection.close() message = oe.pgcode or oe.pgerror or str(oe) message = message.replace('\n', ' ') return [[timestamp(0), 'ERROR, SQLSTATE: {0}'.format(message)]], None @ctl.command('remove', help='Remove cluster from DCS') @click.argument('cluster_name') @option_format @click.pass_obj def remove(obj, cluster_name, fmt): dcs = get_dcs(obj, cluster_name) cluster = dcs.get_cluster() output_members(cluster, cluster_name, fmt=fmt) confirm = click.prompt('Please confirm the cluster name to remove', type=str) if confirm != cluster_name: raise PatroniCtlException('Cluster names specified do not match') message = 'Yes I am aware' confirm = \ click.prompt('You are about to remove all information in DCS for {0}, please type: "{1}"'.format(cluster_name, message), type=str) if message != confirm: raise PatroniCtlException('You did not exactly type "{0}"'.format(message)) if cluster.leader and cluster.leader.name: confirm = click.prompt('This cluster currently is healthy. Please specify the master name to continue') if confirm != cluster.leader.name: raise PatroniCtlException('You did not specify the current master of the cluster') dcs.delete_cluster() def check_response(response, member_name, action_name, silent_success=False): if response.status >= 400: click.echo('Failed: {0} for member {1}, status code={2}, ({3})'.format( action_name, member_name, response.status, response.data.decode('utf-8') )) return False elif not silent_success: click.echo('Success: {0} for member {1}'.format(action_name, member_name)) return True def parse_scheduled(scheduled): if (scheduled or 'now') != 'now': try: scheduled_at = dateutil.parser.parse(scheduled) if scheduled_at.tzinfo is None: scheduled_at = tzlocal.get_localzone().localize(scheduled_at) except (ValueError, TypeError): message = 'Unable to parse scheduled timestamp ({0}). It should be in an unambiguous format (e.g. ISO 8601)' raise PatroniCtlException(message.format(scheduled)) return scheduled_at return None @ctl.command('reload', help='Reload cluster member configuration') @click.argument('cluster_name') @click.argument('member_names', nargs=-1) @click.option('--role', '-r', help='Reload only members with this role', default='any', type=click.Choice(['master', 'replica', 'any'])) @option_force @click.pass_obj def reload(obj, cluster_name, member_names, force, role): cluster = get_dcs(obj, cluster_name).get_cluster() members = get_members(cluster, cluster_name, member_names, role, force, 'reload') for member in members: r = request_patroni(member, 'post', 'reload') if r.status == 200: click.echo('No changes to apply on member {0}'.format(member.name)) elif r.status == 202: click.echo('Reload request received for member {0} and will be processed within {1} seconds'.format( member.name, cluster.config.data.get('loop_wait')) ) else: click.echo('Failed: reload for member {0}, status code={1}, ({2})'.format( member.name, r.status, r.data.decode('utf-8')) ) @ctl.command('restart', help='Restart cluster member') @click.argument('cluster_name') @click.argument('member_names', nargs=-1) @click.option('--role', '-r', help='Restart only members with this role', default='any', type=click.Choice(['master', 'replica', 'any'])) @click.option('--any', 'p_any', help='Restart a single member only', is_flag=True) @click.option('--scheduled', help='Timestamp of a scheduled restart in unambiguous format (e.g. ISO 8601)', default=None) @click.option('--pg-version', 'version', help='Restart if the PostgreSQL version is less than provided (e.g. 9.5.2)', default=None) @click.option('--pending', help='Restart if pending', is_flag=True) @click.option('--timeout', help='Return error and fail over if necessary when restarting takes longer than this.') @option_force @click.pass_obj def restart(obj, cluster_name, member_names, force, role, p_any, scheduled, version, pending, timeout): cluster = get_dcs(obj, cluster_name).get_cluster() members = get_members(cluster, cluster_name, member_names, role, force, 'restart', False) if scheduled is None and not force: next_hour = (datetime.datetime.now() + datetime.timedelta(hours=1)).strftime('%Y-%m-%dT%H:%M') scheduled = click.prompt('When should the restart take place (e.g. ' + next_hour + ') ', type=str, default='now') scheduled_at = parse_scheduled(scheduled) confirm_members_action(members, force, 'restart', scheduled_at) if p_any: random.shuffle(members) members = members[:1] if version is None and not force: version = click.prompt('Restart if the PostgreSQL version is less than provided (e.g. 9.5.2) ', type=str, default='') content = {} if pending: content['restart_pending'] = True if version: try: postgres_version_to_int(version) except PatroniException as e: raise PatroniCtlException(e.value) content['postgres_version'] = version if scheduled_at: if cluster.is_paused(): raise PatroniCtlException("Can't schedule restart in the paused state") content['schedule'] = scheduled_at.isoformat() if timeout is not None: content['timeout'] = timeout for member in members: if 'schedule' in content: if force and member.data.get('scheduled_restart'): r = request_patroni(member, 'delete', 'restart') check_response(r, member.name, 'flush scheduled restart', True) r = request_patroni(member, 'post', 'restart', content) if r.status == 200: click.echo('Success: restart on member {0}'.format(member.name)) elif r.status == 202: click.echo('Success: restart scheduled on member {0}'.format(member.name)) elif r.status == 409: click.echo('Failed: another restart is already scheduled on member {0}'.format(member.name)) else: click.echo('Failed: restart for member {0}, status code={1}, ({2})'.format( member.name, r.status, r.data.decode('utf-8')) ) @ctl.command('reinit', help='Reinitialize cluster member') @click.argument('cluster_name') @click.argument('member_names', nargs=-1) @option_force @click.option('--wait', help='Wait until reinitialization completes', is_flag=True) @click.pass_obj def reinit(obj, cluster_name, member_names, force, wait): cluster = get_dcs(obj, cluster_name).get_cluster() members = get_members(cluster, cluster_name, member_names, None, force, 'reinitialize') wait_on_members = [] for member in members: body = {'force': force} while True: r = request_patroni(member, 'post', 'reinitialize', body) started = check_response(r, member.name, 'reinitialize') if not started and r.data.endswith(b' already in progress') \ and not force and click.confirm('Do you want to cancel it and reinitialize anyway?'): body['force'] = True continue break if started and wait: wait_on_members.append(member) last_display = [] while wait_on_members: if wait_on_members != last_display: click.echo('Waiting for reinitialize to complete on: {0}'.format( ", ".join(member.name for member in wait_on_members)) ) last_display[:] = wait_on_members time.sleep(2) for member in wait_on_members: data = json.loads(request_patroni(member, 'get', 'patroni').data.decode('utf-8')) if data.get('state') != 'creating replica': click.echo('Reinitialize is completed on: {0}'.format(member.name)) wait_on_members.remove(member) def _do_failover_or_switchover(obj, action, cluster_name, master, candidate, force, scheduled=None): """ We want to trigger a failover or switchover for the specified cluster name. We verify that the cluster name, master name and candidate name are correct. If so, we trigger an action and keep the client up to date. """ dcs = get_dcs(obj, cluster_name) cluster = dcs.get_cluster() if action == 'switchover' and cluster.leader is None: raise PatroniCtlException('This cluster has no master') if master is None: if force or action == 'failover': master = cluster.leader and cluster.leader.name else: master = click.prompt('Master', type=str, default=cluster.leader.member.name) if master is not None and cluster.leader and cluster.leader.member.name != master: raise PatroniCtlException('Member {0} is not the leader of cluster {1}'.format(master, cluster_name)) # excluding members with nofailover tag candidate_names = [str(m.name) for m in cluster.members if m.name != master and not m.nofailover] # We sort the names for consistent output to the client candidate_names.sort() if not candidate_names: raise PatroniCtlException('No candidates found to {0} to'.format(action)) if candidate is None and not force: candidate = click.prompt('Candidate ' + str(candidate_names), type=str, default='') if action == 'failover' and not candidate: raise PatroniCtlException('Failover could be performed only to a specific candidate') if candidate == master: raise PatroniCtlException(action.title() + ' target and source are the same.') if candidate and candidate not in candidate_names: raise PatroniCtlException('Member {0} does not exist in cluster {1}'.format(candidate, cluster_name)) scheduled_at_str = None scheduled_at = None if action == 'switchover': if scheduled is None and not force: next_hour = (datetime.datetime.now() + datetime.timedelta(hours=1)).strftime('%Y-%m-%dT%H:%M') scheduled = click.prompt('When should the switchover take place (e.g. ' + next_hour + ' ) ', type=str, default='now') scheduled_at = parse_scheduled(scheduled) if scheduled_at: if cluster.is_paused(): raise PatroniCtlException("Can't schedule switchover in the paused state") scheduled_at_str = scheduled_at.isoformat() failover_value = {'leader': master, 'candidate': candidate, 'scheduled_at': scheduled_at_str} logging.debug(failover_value) # By now we have established that the leader exists and the candidate exists click.echo('Current cluster topology') output_members(dcs.get_cluster(), cluster_name) if not force: demote_msg = ', demoting current master ' + master if master else '' if scheduled_at_str: if not click.confirm('Are you sure you want to schedule {0} of cluster {1} at {2}{3}?' .format(action, cluster_name, scheduled_at_str, demote_msg)): raise PatroniCtlException('Aborting scheduled ' + action) else: if not click.confirm('Are you sure you want to {0} cluster {1}{2}?' .format(action, cluster_name, demote_msg)): raise PatroniCtlException('Aborting ' + action) r = None try: member = cluster.leader.member if cluster.leader else cluster.get_member(candidate, False) r = request_patroni(member, 'post', action, failover_value) # probably old patroni, which doesn't support switchover yet if r.status == 501 and action == 'switchover' and b'Server does not support this operation' in r.data: r = request_patroni(member, 'post', 'failover', failover_value) if r.status in (200, 202): logging.debug(r) cluster = dcs.get_cluster() logging.debug(cluster) click.echo('{0} {1}'.format(timestamp(), r.data.decode('utf-8'))) else: click.echo('{0} failed, details: {1}, {2}'.format(action.title(), r.status, r.data.decode('utf-8'))) return except Exception: logging.exception(r) logging.warning('Failing over to DCS') click.echo('{0} Could not {1} using Patroni api, falling back to DCS'.format(timestamp(), action)) dcs.manual_failover(master, candidate, scheduled_at=scheduled_at) output_members(cluster, cluster_name) @ctl.command('failover', help='Failover to a replica') @arg_cluster_name @click.option('--master', help='The name of the current master', default=None) @click.option('--candidate', help='The name of the candidate', default=None) @option_force @click.pass_obj def failover(obj, cluster_name, master, candidate, force): action = 'switchover' if master else 'failover' _do_failover_or_switchover(obj, action, cluster_name, master, candidate, force) @ctl.command('switchover', help='Switchover to a replica') @arg_cluster_name @click.option('--master', help='The name of the current master', default=None) @click.option('--candidate', help='The name of the candidate', default=None) @click.option('--scheduled', help='Timestamp of a scheduled switchover in unambiguous format (e.g. ISO 8601)', default=None) @option_force @click.pass_obj def switchover(obj, cluster_name, master, candidate, force, scheduled): _do_failover_or_switchover(obj, 'switchover', cluster_name, master, candidate, force, scheduled) def output_members(cluster, name, extended=False, fmt='pretty'): rows = [] logging.debug(cluster) cluster = cluster_as_json(cluster) columns = ['Cluster', 'Member', 'Host', 'Role', 'State', 'TL', 'Lag in MB'] for c in ('Pending restart', 'Scheduled restart'): if extended or any(m.get(c.lower().replace(' ', '_')) for m in cluster['members']): columns.append(c) # Show Host as 'host:port' if somebody is running on non-standard port or two nodes are running on the same host append_port = any(m['port'] != 5432 for m in cluster['members']) or\ len(set(m['host'] for m in cluster['members'])) < len(cluster['members']) for m in cluster['members']: logging.debug(m) lag = m.get('lag', '') m.update(cluster=name, member=m['name'], tl=m.get('timeline', ''), role='' if m['role'] == 'replica' else m['role'].replace('_', ' ').title(), lag_in_mb=round(lag/1024/1024) if isinstance(lag, six.integer_types) else lag, pending_restart='*' if m.get('pending_restart') else '') if append_port: m['host'] = ':'.join([m['host'], str(m['port'])]) if 'scheduled_restart' in m: value = m['scheduled_restart']['schedule'] if 'postgres_version' in m['scheduled_restart']: value += ' if version < {0}'.format(m['scheduled_restart']['postgres_version']) m['scheduled_restart'] = value rows.append([m.get(n.lower().replace(' ', '_'), '') for n in columns]) print_output(columns, rows, {'Lag in MB': 'r', 'TL': 'r'}, fmt) if fmt != 'pretty': # Omit service info when using machine-readable formats return service_info = [] if cluster.get('pause'): service_info.append('Maintenance mode: on') if 'scheduled_switchover' in cluster: info = 'Switchover scheduled at: ' + cluster['scheduled_switchover']['at'] for name in ('from', 'to'): if name in cluster['scheduled_switchover']: info += '\n{0:>24}: {1}'.format(name, cluster['scheduled_switchover'][name]) service_info.append(info) if service_info: click.echo(' ' + '\n '.join(service_info)) @ctl.command('list', help='List the Patroni members for a given Patroni') @click.argument('cluster_names', nargs=-1) @click.option('--extended', '-e', help='Show some extra information', is_flag=True) @click.option('--timestamp', '-t', 'ts', help='Print timestamp', is_flag=True) @option_format @option_watch @option_watchrefresh @click.pass_obj def members(obj, cluster_names, fmt, watch, w, extended, ts): if not cluster_names: if 'scope' in obj: cluster_names = [obj['scope']] if not cluster_names: return logging.warning('Listing members: No cluster names were provided') for cluster_name in cluster_names: dcs = get_dcs(obj, cluster_name) for _ in watching(w, watch): if ts: click.echo(timestamp(0)) cluster = dcs.get_cluster() output_members(cluster, cluster_name, extended, fmt) def timestamp(precision=6): return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:precision - 7] @ctl.command('configure', help='Create configuration file') @click.option('--config-file', '-c', help='Configuration file', prompt='Configuration file', default=CONFIG_FILE_PATH) @click.option('--dcs', '-d', help='The DCS connect url', prompt='DCS connect url', default='etcd://localhost:2379') @click.option('--namespace', '-n', help='The namespace', prompt='Namespace', default='/service/') def configure(config_file, dcs, namespace): store_config({'dcs_api': str(dcs), 'namespace': str(namespace)}, config_file) def touch_member(config, dcs): ''' Rip-off of the ha.touch_member without inter-class dependencies ''' p = Postgresql(config['postgresql']) p.set_state('running') p.set_role('master') def restapi_connection_string(config): protocol = 'https' if config.get('certfile') else 'http' connect_address = config.get('connect_address') listen = config['listen'] return '{0}://{1}/patroni'.format(protocol, connect_address or listen) data = { 'conn_url': p.connection_string, 'api_url': restapi_connection_string(config['restapi']), 'state': p.state, 'role': p.role } return dcs.touch_member(data, permanent=True) def set_defaults(config, cluster_name): """fill-in some basic configuration parameters if config file is not set """ config['postgresql'].setdefault('name', cluster_name) config['postgresql'].setdefault('scope', cluster_name) config['postgresql'].setdefault('listen', '127.0.0.1') config['postgresql']['authentication'] = {'replication': None} config['restapi']['listen'] = ':' in config['restapi']['listen'] and config['restapi']['listen'] or '127.0.0.1:8008' @ctl.command('scaffold', help='Create a structure for the cluster in DCS') @click.argument('cluster_name') @click.option('--sysid', '-s', help='System ID of the cluster to put into the initialize key', default="") @click.pass_obj def scaffold(obj, cluster_name, sysid): dcs = get_dcs(obj, cluster_name) cluster = dcs.get_cluster() if cluster and cluster.initialize is not None: raise PatroniCtlException("This cluster is already initialized") if not dcs.initialize(create_new=True, sysid=sysid): # initialize key already exists, don't touch this cluster raise PatroniCtlException("Initialize key for cluster {0} already exists".format(cluster_name)) set_defaults(obj, cluster_name) # make sure the leader keys will never expire if not (touch_member(obj, dcs) and dcs.attempt_to_acquire_leader(permanent=True)): # we did initialize this cluster, but failed to write the leader or member keys, wipe it down completely. dcs.delete_cluster() raise PatroniCtlException("Unable to install permanent leader for cluster {0}".format(cluster_name)) click.echo("Cluster {0} has been created successfully".format(cluster_name)) @ctl.command('flush', help='Flush scheduled events') @click.argument('cluster_name') @click.argument('member_names', nargs=-1) @click.argument('target', type=click.Choice(['restart'])) @click.option('--role', '-r', help='Flush only members with this role', default='any', type=click.Choice(['master', 'replica', 'any'])) @option_force @click.pass_obj def flush(obj, cluster_name, member_names, force, role, target): cluster = get_dcs(obj, cluster_name).get_cluster() members = get_members(cluster, cluster_name, member_names, role, force, 'flush') for member in members: if target == 'restart': if member.data.get('scheduled_restart'): r = request_patroni(member, 'delete', 'restart') check_response(r, member.name, 'flush scheduled restart') else: click.echo('No scheduled restart for member {0}'.format(member.name)) def wait_until_pause_is_applied(dcs, paused, old_cluster): click.echo("'{0}' request sent, waiting until it is recognized by all nodes".format(paused and 'pause' or 'resume')) old = {m.name: m.index for m in old_cluster.members if m.api_url} loop_wait = old_cluster.config.data.get('loop_wait', dcs.loop_wait) for _ in polling_loop(loop_wait + 1): cluster = dcs.get_cluster() if all(m.data.get('pause', False) == paused for m in cluster.members if m.name in old): break else: remaining = [m.name for m in cluster.members if m.data.get('pause', False) != paused and m.name in old and old[m.name] != m.index] if remaining: return click.echo("{0} members didn't recognized pause state after {1} seconds" .format(', '.join(remaining), loop_wait)) return click.echo('Success: cluster management is {0}'.format(paused and 'paused' or 'resumed')) def toggle_pause(config, cluster_name, paused, wait): dcs = get_dcs(config, cluster_name) cluster = dcs.get_cluster() if cluster.is_paused() == paused: raise PatroniCtlException('Cluster is {0} paused'.format(paused and 'already' or 'not')) members = [] if cluster.leader: members.append(cluster.leader.member) members.extend([m for m in cluster.members if m.api_url and (not members or members[0].name != m.name)]) for member in members: try: r = request_patroni(member, 'patch', 'config', {'pause': paused or None}) except Exception as err: logging.warning(str(err)) logging.warning('Member %s is not accessible', member.name) continue if r.status == 200: if wait: wait_until_pause_is_applied(dcs, paused, cluster) else: click.echo('Success: cluster management is {0}'.format(paused and 'paused' or 'resumed')) else: click.echo('Failed: {0} cluster management status code={1}, ({2})'.format( paused and 'pause' or 'resume', r.status, r.data.decode('utf-8'))) break else: raise PatroniCtlException('Can not find accessible cluster member') @ctl.command('pause', help='Disable auto failover') @arg_cluster_name @click.pass_obj @click.option('--wait', help='Wait until pause is applied on all nodes', is_flag=True) def pause(obj, cluster_name, wait): return toggle_pause(obj, cluster_name, True, wait) @ctl.command('resume', help='Resume auto failover') @arg_cluster_name @click.option('--wait', help='Wait until pause is cleared on all nodes', is_flag=True) @click.pass_obj def resume(obj, cluster_name, wait): return toggle_pause(obj, cluster_name, False, wait) @contextmanager def temporary_file(contents, suffix='', prefix='tmp'): """Creates a temporary file with specified contents that persists for the context. :param contents: binary string that will be written to the file. :param prefix: will be prefixed to the filename. :param suffix: will be appended to the filename. :returns path of the created file. """ tmp = tempfile.NamedTemporaryFile(suffix=suffix, prefix=prefix, delete=False) with tmp: tmp.write(contents) try: yield tmp.name finally: os.unlink(tmp.name) def show_diff(before_editing, after_editing): """Shows a diff between two strings. If the output is to a tty the diff will be colored. Inputs are expected to be unicode strings. """ def listify(string): return [l+'\n' for l in string.rstrip('\n').split('\n')] unified_diff = difflib.unified_diff(listify(before_editing), listify(after_editing)) if sys.stdout.isatty(): buf = io.StringIO() for line in unified_diff: # Force cast to unicode as difflib on Python 2.7 returns a mix of unicode and str. buf.write(six.text_type(line)) buf.seek(0) class opts: side_by_side = False width = 80 tab_width = 8 cdiff.markup_to_pager(cdiff.PatchStream(buf), opts) else: for line in unified_diff: click.echo(line.rstrip('\n')) def format_config_for_editing(data): """Formats configuration as YAML for human consumption. :param data: configuration as nested dictionaries :returns unicode YAML of the configuration""" return yaml.safe_dump(data, default_flow_style=False, encoding=None, allow_unicode=True) def apply_config_changes(before_editing, data, kvpairs): """Applies config changes specified as a list of key-value pairs. Keys are interpreted as dotted paths into the configuration data structure. Except for paths beginning with `postgresql.parameters` where rest of the path is used directly to allow for PostgreSQL GUCs containing dots. Values are interpreted as YAML values. :param before_editing: human representation before editing :param data: configuration datastructure :param kvpairs: list of strings containing key value pairs separated by = :returns tuple of human readable and parsed datastructure after changes """ changed_data = copy.deepcopy(data) def set_path_value(config, path, value, prefix=()): # Postgresql GUCs can't be nested, but can contain dots so we re-flatten the structure for this case if prefix == ('postgresql', 'parameters'): path = ['.'.join(path)] key = path[0] if len(path) == 1: if value is None: config.pop(key, None) else: config[key] = value else: if not isinstance(config.get(key), dict): config[key] = {} set_path_value(config[key], path[1:], value, prefix + (key,)) if config[key] == {}: del config[key] for pair in kvpairs: if not pair or "=" not in pair: raise PatroniCtlException("Invalid parameter setting {0}".format(pair)) key_path, value = pair.split("=", 1) set_path_value(changed_data, key_path.strip().split("."), yaml.safe_load(value)) return format_config_for_editing(changed_data), changed_data def apply_yaml_file(data, filename): """Applies changes from a YAML file to configuration :param data: configuration datastructure :param filename: name of the YAML file, - is taken to mean standard input :returns tuple of human readable and parsed datastructure after changes """ changed_data = copy.deepcopy(data) if filename == '-': new_options = yaml.safe_load(sys.stdin) else: with open(filename) as fd: new_options = yaml.safe_load(fd) patch_config(changed_data, new_options) return format_config_for_editing(changed_data), changed_data def find_executable(executable, path=None): _, ext = os.path.splitext(executable) if (sys.platform == 'win32') and (ext != '.exe'): executable = executable + '.exe' if os.path.isfile(executable): return executable if path is None: path = os.environ.get('PATH', os.defpath) for p in path.split(os.pathsep): f = os.path.join(p, executable) if os.path.isfile(f): return f def invoke_editor(before_editing, cluster_name): """Starts editor command to edit configuration in human readable format :param before_editing: human representation before editing :returns tuple of human readable and parsed datastructure after changes """ editor_cmd = os.environ.get('EDITOR') if not editor_cmd: for editor in ('editor', 'vi'): editor_cmd = find_executable(editor) if editor_cmd: logging.debug('Setting fallback editor_cmd=%s', editor) break if not editor_cmd: raise PatroniCtlException('EDITOR environment variable is not set. editor or vi are not available') with temporary_file(contents=before_editing.encode('utf-8'), suffix='.yaml', prefix='{0}-config-'.format(cluster_name)) as tmpfile: ret = subprocess.call([editor_cmd, tmpfile]) if ret: raise PatroniCtlException("Editor exited with return code {0}".format(ret)) with codecs.open(tmpfile, encoding='utf-8') as fd: after_editing = fd.read() return after_editing, yaml.safe_load(after_editing) @ctl.command('edit-config', help="Edit cluster configuration") @arg_cluster_name @click.option('--quiet', '-q', is_flag=True, help='Do not show changes') @click.option('--set', '-s', 'kvpairs', multiple=True, help='Set specific configuration value. Can be specified multiple times') @click.option('--pg', '-p', 'pgkvpairs', multiple=True, help='Set specific PostgreSQL parameter value. Shorthand for -s postgresql.parameters. ' 'Can be specified multiple times') @click.option('--apply', 'apply_filename', help='Apply configuration from file. Use - for stdin.') @click.option('--replace', 'replace_filename', help='Apply configuration from file, replacing existing configuration.' ' Use - for stdin.') @option_force @click.pass_obj def edit_config(obj, cluster_name, force, quiet, kvpairs, pgkvpairs, apply_filename, replace_filename): dcs = get_dcs(obj, cluster_name) cluster = dcs.get_cluster() before_editing = format_config_for_editing(cluster.config.data) after_editing = None # Serves as a flag if any changes were requested changed_data = cluster.config.data if replace_filename: after_editing, changed_data = apply_yaml_file({}, replace_filename) if apply_filename: after_editing, changed_data = apply_yaml_file(changed_data, apply_filename) if kvpairs or pgkvpairs: all_pairs = list(kvpairs) + ['postgresql.parameters.'+v.lstrip() for v in pgkvpairs] after_editing, changed_data = apply_config_changes(before_editing, changed_data, all_pairs) # If no changes were specified on the command line invoke editor if after_editing is None: after_editing, changed_data = invoke_editor(before_editing, cluster_name) if cluster.config.data == changed_data: if not quiet: click.echo("Not changed") return if not quiet: show_diff(before_editing, after_editing) if (apply_filename == '-' or replace_filename == '-') and not force: click.echo("Use --force option to apply changes") return if force or click.confirm('Apply these changes?'): if not dcs.set_config_value(json.dumps(changed_data), cluster.config.index): raise PatroniCtlException("Config modification aborted due to concurrent changes") click.echo("Configuration changed") @ctl.command('show-config', help="Show cluster configuration") @arg_cluster_name @click.pass_obj def show_config(obj, cluster_name): cluster = get_dcs(obj, cluster_name).get_cluster() click.echo(format_config_for_editing(cluster.config.data)) @ctl.command('version', help='Output version of patronictl command or a running Patroni instance') @click.argument('cluster_name', required=False) @click.argument('member_names', nargs=-1) @click.pass_obj def version(obj, cluster_name, member_names): click.echo("patronictl version {0}".format(__version__)) if not cluster_name: return click.echo("") cluster = get_dcs(obj, cluster_name).get_cluster() for m in cluster.members: if m.api_url: if not member_names or m.name in member_names: try: response = request_patroni(m) data = json.loads(response.data.decode('utf-8')) version = data.get('patroni', {}).get('version') pg_version = data.get('server_version') pg_version_str = " PostgreSQL {0}".format(format_pg_version(pg_version)) if pg_version else "" click.echo("{0}: Patroni {1}{2}".format(m.name, version, pg_version_str)) except Exception as e: click.echo("{0}: failed to get version: {1}".format(m.name, e)) @ctl.command('history', help="Show the history of failovers/switchovers") @arg_cluster_name @option_format @click.pass_obj def history(obj, cluster_name, fmt): cluster = get_dcs(obj, cluster_name).get_cluster() history = cluster.history and cluster.history.lines or [] for line in history: if len(line) < 4: line.append('') print_output(['TL', 'LSN', 'Reason', 'Timestamp'], history, {'TL': 'r', 'LSN': 'r'}, fmt) def format_pg_version(version): if version < 100000: return "{0}.{1}.{2}".format(version // 10000, version // 100 % 100, version % 100) else: return "{0}.{1}".format(version // 10000, version % 100) patroni-1.6.4/patroni/dcs/000077500000000000000000000000001361356115100154255ustar00rootroot00000000000000patroni-1.6.4/patroni/dcs/__init__.py000066400000000000000000000724441361356115100175510ustar00rootroot00000000000000import abc import dateutil import importlib import inspect import json import logging import os import pkgutil import re import six import sys import time from collections import defaultdict, namedtuple from copy import deepcopy from patroni.exceptions import PatroniException from patroni.utils import parse_bool, uri from random import randint from six.moves.urllib_parse import urlparse, urlunparse, parse_qsl from threading import Event, Lock slot_name_re = re.compile('^[a-z0-9_]{1,63}$') logger = logging.getLogger(__name__) def slot_name_from_member_name(member_name): """Translate member name to valid PostgreSQL slot name. PostgreSQL replication slot names must be valid PostgreSQL names. This function maps the wider space of member names to valid PostgreSQL names. Names are lowercased, dashes and periods common in hostnames are replaced with underscores, other characters are encoded as their unicode codepoint. Name is truncated to 64 characters. Multiple different member names may map to a single slot name.""" def replace_char(match): c = match.group(0) return '_' if c in '-.' else "u{:04d}".format(ord(c)) slot_name = re.sub('[^a-z0-9_]', replace_char, member_name.lower()) return slot_name[0:63] def parse_connection_string(value): """Original Governor stores connection strings for each cluster members if a following format: postgres://{username}:{password}@{connect_address}/postgres Since each of our patroni instances provides own REST API endpoint it's good to store this information in DCS among with postgresql connection string. In order to not introduce new keys and be compatible with original Governor we decided to extend original connection string in a following way: postgres://{username}:{password}@{connect_address}/postgres?application_name={api_url} This way original Governor could use such connection string as it is, because of feature of `libpq` library. This method is able to split connection string stored in DCS into two parts, `conn_url` and `api_url`""" scheme, netloc, path, params, query, fragment = urlparse(value) conn_url = urlunparse((scheme, netloc, path, params, '', fragment)) api_url = ([v for n, v in parse_qsl(query) if n == 'application_name'] or [None])[0] return conn_url, api_url def dcs_modules(): """Get names of DCS modules, depending on execution environment. If being packaged with PyInstaller, modules aren't discoverable dynamically by scanning source directory because `FrozenImporter` doesn't implement `iter_modules` method. But it is still possible to find all potential DCS modules by iterating through `toc`, which contains list of all "frozen" resources.""" dcs_dirname = os.path.dirname(__file__) module_prefix = __package__ + '.' if getattr(sys, 'frozen', False): importer = pkgutil.get_importer(dcs_dirname) return [module for module in list(importer.toc) if module.startswith(module_prefix) and module.count('.') == 2] else: return [module_prefix + name for _, name, is_pkg in pkgutil.iter_modules([dcs_dirname]) if not is_pkg] def get_dcs(config): modules = dcs_modules() for module_name in modules: name = module_name.split('.')[-1] if name in config: # we will try to import only modules which have configuration section in the config file try: module = importlib.import_module(module_name) for key, item in module.__dict__.items(): # iterate through the module content # try to find implementation of AbstractDCS interface, class name must match with module_name if key.lower() == name and inspect.isclass(item) and issubclass(item, AbstractDCS): # propagate some parameters config[name].update({p: config[p] for p in ('namespace', 'name', 'scope', 'loop_wait', 'patronictl', 'ttl', 'retry_timeout') if p in config}) return item(config[name]) except ImportError: logger.debug('Failed to import %s', module_name) available_implementations = [] for module_name in modules: name = module_name.split('.')[-1] try: module = importlib.import_module(module_name) available_implementations.extend(name for key, item in module.__dict__.items() if key.lower() == name and inspect.isclass(item) and issubclass(item, AbstractDCS)) except ImportError: logger.info('Failed to import %s', module_name) raise PatroniException("""Can not find suitable configuration of distributed configuration store Available implementations: """ + ', '.join(sorted(set(available_implementations)))) class Member(namedtuple('Member', 'index,name,session,data')): """Immutable object (namedtuple) which represents single member of PostgreSQL cluster. Consists of the following fields: :param index: modification index of a given member key in a Configuration Store :param name: name of PostgreSQL cluster member :param session: either session id or just ttl in seconds :param data: arbitrary data i.e. conn_url, api_url, xlog location, state, role, tags, etc... There are two mandatory keys in a data: conn_url: connection string containing host, user and password which could be used to access this member. api_url: REST API url of patroni instance""" @staticmethod def from_node(index, name, session, data): """ >>> Member.from_node(-1, '', '', '{"conn_url": "postgres://foo@bar/postgres"}') is not None True >>> Member.from_node(-1, '', '', '{') Member(index=-1, name='', session='', data={}) """ if data.startswith('postgres'): conn_url, api_url = parse_connection_string(data) data = {'conn_url': conn_url, 'api_url': api_url} else: try: data = json.loads(data) except (TypeError, ValueError): data = {} return Member(index, name, session, data) @property def conn_url(self): conn_url = self.data.get('conn_url') conn_kwargs = self.data.get('conn_kwargs') if conn_url: return conn_url if conn_kwargs: conn_url = uri('postgresql', (conn_kwargs.get('host'), conn_kwargs.get('port', 5432))) self.data['conn_url'] = conn_url return conn_url def conn_kwargs(self, auth=None): defaults = { "host": "", "port": "", "database": "" } ret = self.data.get('conn_kwargs') if ret: defaults.update(ret) ret = defaults else: r = urlparse(self.conn_url) ret = { 'host': r.hostname, 'port': r.port or 5432, 'database': r.path[1:] } self.data['conn_kwargs'] = ret.copy() # apply any remaining authentication parameters if auth and isinstance(auth, dict): ret.update({k: v for k, v in auth.items() if v is not None}) if 'username' in auth: ret['user'] = ret.pop('username') return ret @property def api_url(self): return self.data.get('api_url') @property def tags(self): return self.data.get('tags', {}) @property def nofailover(self): return self.tags.get('nofailover', False) @property def replicatefrom(self): return self.tags.get('replicatefrom') @property def clonefrom(self): return self.tags.get('clonefrom', False) and bool(self.conn_url) @property def state(self): return self.data.get('state', 'unknown') @property def is_running(self): return self.state == 'running' class RemoteMember(Member): """ Represents a remote master for a standby cluster """ def __new__(cls, name, data): return super(RemoteMember, cls).__new__(cls, None, name, None, data) @staticmethod def allowed_keys(): return ('primary_slot_name', 'create_replica_methods', 'restore_command', 'archive_cleanup_command', 'recovery_min_apply_delay', 'no_replication_slot') def __getattr__(self, name): if name in RemoteMember.allowed_keys(): return self.data.get(name) class Leader(namedtuple('Leader', 'index,session,member')): """Immutable object (namedtuple) which represents leader key. Consists of the following fields: :param index: modification index of a leader key in a Configuration Store :param session: either session id or just ttl in seconds :param member: reference to a `Member` object which represents current leader (see `Cluster.members`)""" @property def name(self): return self.member.name def conn_kwargs(self, auth=None): return self.member.conn_kwargs(auth) @property def conn_url(self): return self.member.conn_url @property def data(self): return self.member.data @property def timeline(self): return self.data.get('timeline') @property def checkpoint_after_promote(self): """ >>> Leader(1, '', Member.from_node(1, '', '', '{"version":"z"}')).checkpoint_after_promote """ version = self.data.get('version') if version: try: # 1.5.6 is the last version which doesn't expose checkpoint_after_promote: false if tuple(map(int, version.split('.'))) > (1, 5, 6): return self.data['role'] == 'master' and 'checkpoint_after_promote' not in self.data except Exception: logger.debug('Failed to parse Patroni version %s', version) class Failover(namedtuple('Failover', 'index,leader,candidate,scheduled_at')): """ >>> 'Failover' in str(Failover.from_node(1, '{"leader": "cluster_leader"}')) True >>> 'Failover' in str(Failover.from_node(1, {"leader": "cluster_leader"})) True >>> 'Failover' in str(Failover.from_node(1, '{"leader": "cluster_leader", "member": "cluster_candidate"}')) True >>> Failover.from_node(1, 'null') is None False >>> n = '{"leader": "cluster_leader", "member": "cluster_candidate", "scheduled_at": "2016-01-14T10:09:57.1394Z"}' >>> 'tzinfo=' in str(Failover.from_node(1, n)) True >>> Failover.from_node(1, None) is None False >>> Failover.from_node(1, '{}') is None False >>> 'abc' in Failover.from_node(1, 'abc:def') True """ @staticmethod def from_node(index, value): if isinstance(value, dict): data = value elif value: try: data = json.loads(value) if not isinstance(data, dict): data = {} except ValueError: t = [a.strip() for a in value.split(':')] leader = t[0] candidate = t[1] if len(t) > 1 else None return Failover(index, leader, candidate, None) if leader or candidate else None else: data = {} if data.get('scheduled_at'): data['scheduled_at'] = dateutil.parser.parse(data['scheduled_at']) return Failover(index, data.get('leader'), data.get('member'), data.get('scheduled_at')) def __len__(self): return int(bool(self.leader)) + int(bool(self.candidate)) class ClusterConfig(namedtuple('ClusterConfig', 'index,data,modify_index')): @staticmethod def from_node(index, data, modify_index=None): """ >>> ClusterConfig.from_node(1, '{') is None False """ try: data = json.loads(data) except (TypeError, ValueError): data = None modify_index = 0 if not isinstance(data, dict): data = {} return ClusterConfig(index, data, index if modify_index is None else modify_index) @property def permanent_slots(self): return isinstance(self.data, dict) and ( self.data.get('permanent_replication_slots') or self.data.get('permanent_slots') or self.data.get('slots') ) or {} class SyncState(namedtuple('SyncState', 'index,leader,sync_standby')): """Immutable object (namedtuple) which represents last observed synhcronous replication state :param index: modification index of a synchronization key in a Configuration Store :param leader: reference to member that was leader :param sync_standby: standby that was last synchronized to leader """ @staticmethod def from_node(index, value): """ >>> SyncState.from_node(1, None).leader is None True >>> SyncState.from_node(1, '{}').leader is None True >>> SyncState.from_node(1, '{').leader is None True >>> SyncState.from_node(1, '[]').leader is None True >>> SyncState.from_node(1, '{"leader": "leader"}').leader == "leader" True >>> SyncState.from_node(1, {"leader": "leader"}).leader == "leader" True """ if isinstance(value, dict): data = value elif value: try: data = json.loads(value) if not isinstance(data, dict): data = {} except (TypeError, ValueError): data = {} else: data = {} return SyncState(index, data.get('leader'), data.get('sync_standby')) def matches(self, name): """ Returns if a node name matches one of the nodes in the sync state >>> s = SyncState(1, 'foo', 'bar') >>> s.matches('foo') True >>> s.matches('bar') True >>> s.matches('baz') False >>> s.matches(None) False >>> SyncState(1, None, None).matches('foo') False """ return name is not None and name in (self.leader, self.sync_standby) class TimelineHistory(namedtuple('TimelineHistory', 'index,value,lines')): """Object representing timeline history file""" @staticmethod def from_node(index, value): """ >>> h = TimelineHistory.from_node(1, 2) >>> h.lines [] """ try: lines = json.loads(value) except (TypeError, ValueError): lines = None if not isinstance(lines, list): lines = [] return TimelineHistory(index, value, lines) class Cluster(namedtuple('Cluster', 'initialize,config,leader,last_leader_operation,members,failover,sync,history')): """Immutable object (namedtuple) which represents PostgreSQL cluster. Consists of the following fields: :param initialize: shows whether this cluster has initialization key stored in DC or not. :param config: global dynamic configuration, reference to `ClusterConfig` object :param leader: `Leader` object which represents current leader of the cluster :param last_leader_operation: int or long object containing position of last known leader operation. This value is stored in `/optime/leader` key :param members: list of Member object, all PostgreSQL cluster members including leader :param failover: reference to `Failover` object :param sync: reference to `SyncState` object, last observed synchronous replication state. :param history: reference to `TimelineHistory` object """ def is_unlocked(self): return not (self.leader and self.leader.name) def has_member(self, member_name): return any(m for m in self.members if m.name == member_name) def get_member(self, member_name, fallback_to_leader=True): return ([m for m in self.members if m.name == member_name] or [self.leader if fallback_to_leader else None])[0] def get_clone_member(self, exclude): exclude = [exclude] + [self.leader.name] if self.leader else [] candidates = [m for m in self.members if m.clonefrom and m.is_running and m.name not in exclude] return candidates[randint(0, len(candidates) - 1)] if candidates else self.leader def check_mode(self, mode): return bool(self.config and parse_bool(self.config.data.get(mode))) def is_paused(self): return self.check_mode('pause') def is_synchronous_mode(self): return self.check_mode('synchronous_mode') def get_replication_slots(self, name, role): # if the replicatefrom tag is set on the member - we should not create the replication slot for it on # the current master, because that member would replicate from elsewhere. We still create the slot if # the replicatefrom destination member is currently not a member of the cluster (fallback to the # master), or if replicatefrom destination member happens to be the current master use_slots = self.config and self.config.data.get('postgresql', {}).get('use_slots', True) if role in ('master', 'standby_leader'): slot_members = [m.name for m in self.members if use_slots and m.name != name and (m.replicatefrom is None or m.replicatefrom == name or not self.has_member(m.replicatefrom))] permanent_slots = (self.config and self.config.permanent_slots or {}).copy() else: # only manage slots for replicas that replicate from this one, except for the leader among them slot_members = [m.name for m in self.members if use_slots and m.replicatefrom == name and m.name != self.leader.name] permanent_slots = {} slots = {slot_name_from_member_name(name): {'type': 'physical'} for name in slot_members} if len(slots) < len(slot_members): # Find which names are conflicting for a nicer error message slot_conflicts = defaultdict(list) for name in slot_members: slot_conflicts[slot_name_from_member_name(name)].append(name) logger.error("Following cluster members share a replication slot name: %s", "; ".join("{} map to {}".format(", ".join(v), k) for k, v in slot_conflicts.items() if len(v) > 1)) # "merge" replication slots for members with permanent_replication_slots for name, value in permanent_slots.items(): if not slot_name_re.match(name): logger.error("Invalid permanent replication slot name '%s'", name) logger.error("Slot name may only contain lower case letters, numbers, and the underscore chars") continue if name in slots: logger.error("Permanent replication slot {'%s': %s} is conflicting with" + " physical replication slot for cluster member", name, value) continue value = deepcopy(value) if not value: value = {'type': 'physical'} if isinstance(value, dict): if 'type' not in value: value['type'] = 'logical' if value.get('database') and value.get('plugin') else 'physical' if value['type'] == 'physical' or value['type'] == 'logical' \ and value.get('database') and value.get('plugin'): slots[name] = value continue logger.error("Bad value for slot '%s' in permanent_slots: %s", name, permanent_slots[name]) return slots def has_permanent_logical_slots(self, name): slots = self.get_replication_slots(name, 'master').values() return any(v for v in slots if v.get("type") == "logical") @property def timeline(self): """ >>> Cluster(0, 0, 0, 0, 0, 0, 0, 0).timeline 0 >>> Cluster(0, 0, 0, 0, 0, 0, 0, TimelineHistory.from_node(1, '[]')).timeline 1 >>> Cluster(0, 0, 0, 0, 0, 0, 0, TimelineHistory.from_node(1, '[["a"]]')).timeline 0 """ if self.history: if self.history.lines: try: return int(self.history.lines[-1][0]) + 1 except Exception: logger.error('Failed to parse cluster history from DCS: %s', self.history.lines) elif self.history.value == '[]': return 1 return 0 @six.add_metaclass(abc.ABCMeta) class AbstractDCS(object): _INITIALIZE = 'initialize' _CONFIG = 'config' _LEADER = 'leader' _FAILOVER = 'failover' _HISTORY = 'history' _MEMBERS = 'members/' _OPTIME = 'optime' _LEADER_OPTIME = _OPTIME + '/' + _LEADER _SYNC = 'sync' def __init__(self, config): """ :param config: dict, reference to config section of selected DCS. i.e.: `zookeeper` for zookeeper, `etcd` for etcd, etc... """ self._name = config['name'] self._base_path = re.sub('/+', '/', '/'.join(['', config.get('namespace', 'service'), config['scope']])) self._set_loop_wait(config.get('loop_wait', 10)) self._ctl = bool(config.get('patronictl', False)) self._cluster = None self._cluster_valid_till = 0 self._cluster_thread_lock = Lock() self._last_leader_operation = '' self.event = Event() def client_path(self, path): return '/'.join([self._base_path, path.lstrip('/')]) @property def initialize_path(self): return self.client_path(self._INITIALIZE) @property def config_path(self): return self.client_path(self._CONFIG) @property def members_path(self): return self.client_path(self._MEMBERS) @property def member_path(self): return self.client_path(self._MEMBERS + self._name) @property def leader_path(self): return self.client_path(self._LEADER) @property def failover_path(self): return self.client_path(self._FAILOVER) @property def history_path(self): return self.client_path(self._HISTORY) @property def leader_optime_path(self): return self.client_path(self._LEADER_OPTIME) @property def sync_path(self): return self.client_path(self._SYNC) @abc.abstractmethod def set_ttl(self, ttl): """Set the new ttl value for leader key""" @abc.abstractmethod def ttl(self): """Get new ttl value""" @abc.abstractmethod def set_retry_timeout(self, retry_timeout): """Set the new value for retry_timeout""" def _set_loop_wait(self, loop_wait): self._loop_wait = loop_wait def reload_config(self, config): self._set_loop_wait(config['loop_wait']) self.set_ttl(config['ttl']) self.set_retry_timeout(config['retry_timeout']) @property def loop_wait(self): return self._loop_wait @abc.abstractmethod def _load_cluster(self): """Internally this method should build `Cluster` object which represents current state and topology of the cluster in DCS. this method supposed to be called only by `get_cluster` method. raise `~DCSError` in case of communication or other problems with DCS. If the current node was running as a master and exception raised, instance would be demoted.""" def get_cluster(self): try: cluster = self._load_cluster() except Exception: self.reset_cluster() raise with self._cluster_thread_lock: self._cluster = cluster self._cluster_valid_till = time.time() + self.ttl return cluster @property def cluster(self): with self._cluster_thread_lock: return self._cluster if self._cluster_valid_till > time.time() else None def reset_cluster(self): with self._cluster_thread_lock: self._cluster = None self._cluster_valid_till = 0 @abc.abstractmethod def _write_leader_optime(self, last_operation): """write current xlog location into `/optime/leader` key in DCS :param last_operation: absolute xlog location in bytes :returns: `!True` on success.""" def write_leader_optime(self, last_operation): if self._last_leader_operation != last_operation and self._write_leader_optime(last_operation): self._last_leader_operation = last_operation @abc.abstractmethod def _update_leader(self): """Update leader key (or session) ttl :returns: `!True` if leader key (or session) has been updated successfully. If not, `!False` must be returned and current instance would be demoted. You have to use CAS (Compare And Swap) operation in order to update leader key, for example for etcd `prevValue` parameter must be used.""" def update_leader(self, last_operation, access_is_restricted=False): """Update leader key (or session) ttl and optime/leader :param last_operation: absolute xlog location in bytes :returns: `!True` if leader key (or session) has been updated successfully. If not, `!False` must be returned and current instance would be demoted.""" ret = self._update_leader() if ret and last_operation: self.write_leader_optime(last_operation) return ret @abc.abstractmethod def attempt_to_acquire_leader(self, permanent=False): """Attempt to acquire leader lock This method should create `/leader` key with value=`~self._name` :param permanent: if set to `!True`, the leader key will never expire. Used in patronictl for the external master :returns: `!True` if key has been created successfully. Key must be created atomically. In case if key already exists it should not be overwritten and `!False` must be returned""" @abc.abstractmethod def set_failover_value(self, value, index=None): """Create or update `/failover` key""" def manual_failover(self, leader, candidate, scheduled_at=None, index=None): failover_value = {} if leader: failover_value['leader'] = leader if candidate: failover_value['member'] = candidate if scheduled_at: failover_value['scheduled_at'] = scheduled_at.isoformat() return self.set_failover_value(json.dumps(failover_value, separators=(',', ':')), index) @abc.abstractmethod def set_config_value(self, value, index=None): """Create or update `/config` key""" @abc.abstractmethod def touch_member(self, data, permanent=False): """Update member key in DCS. This method should create or update key with the name = '/members/' + `~self._name` and value = data in a given DCS. :param data: information about instance (including connection strings) :param ttl: ttl for member key, optional parameter. If it is None `~self.member_ttl will be used` :param permanent: if set to `!True`, the member key will never expire. Used in patronictl for the external master. :returns: `!True` on success otherwise `!False` """ @abc.abstractmethod def take_leader(self): """This method should create leader key with value = `~self._name` and ttl=`~self.ttl` Since it could be called only on initial cluster bootstrap it could create this key regardless, overwriting the key if necessary.""" @abc.abstractmethod def initialize(self, create_new=True, sysid=""): """Race for cluster initialization. :param create_new: False if the key should already exist (in the case we are setting the system_id) :param sysid: PostgreSQL cluster system identifier, if specified, is written to the key :returns: `!True` if key has been created successfully. this method should create atomically initialize key and return `!True` otherwise it should return `!False`""" @abc.abstractmethod def delete_leader(self): """Voluntarily remove leader key from DCS This method should remove leader key if current instance is the leader""" @abc.abstractmethod def cancel_initialization(self): """ Removes the initialize key for a cluster """ @abc.abstractmethod def delete_cluster(self): """Delete cluster from DCS""" @staticmethod def sync_state(leader, sync_standby): """Build sync_state dict""" return {'leader': leader, 'sync_standby': sync_standby} def write_sync_state(self, leader, sync_standby, index=None): sync_value = self.sync_state(leader, sync_standby) return self.set_sync_state_value(json.dumps(sync_value, separators=(',', ':')), index) @abc.abstractmethod def set_history_value(self, value): """""" @abc.abstractmethod def set_sync_state_value(self, value, index=None): """""" @abc.abstractmethod def delete_sync_state(self, index=None): """""" def watch(self, leader_index, timeout): """If the current node is a master it should just sleep. Any other node should watch for changes of leader key with a given timeout :param leader_index: index of a leader key :param timeout: timeout in seconds :returns: `!True` if you would like to reschedule the next run of ha cycle""" self.event.wait(timeout) return self.event.isSet() patroni-1.6.4/patroni/dcs/consul.py000066400000000000000000000510311361356115100173020ustar00rootroot00000000000000from __future__ import absolute_import import json import logging import os import re import socket import ssl import time import urllib3 from consul import ConsulException, NotFound, base from urllib3.exceptions import HTTPError from six.moves.urllib.parse import urlencode, urlparse, quote from six.moves.http_client import HTTPException from . import AbstractDCS, Cluster, ClusterConfig, Failover, Leader, Member, SyncState, TimelineHistory from ..exceptions import DCSError from ..utils import deep_compare, parse_bool, Retry, RetryFailedError, split_host_port, uri, USER_AGENT logger = logging.getLogger(__name__) class ConsulError(DCSError): pass class ConsulInternalError(ConsulException): """An internal Consul server error occurred""" class InvalidSessionTTL(ConsulException): """Session TTL is too small or too big""" class InvalidSession(ConsulException): """invalid session""" class HTTPClient(object): def __init__(self, host='127.0.0.1', port=8500, token=None, scheme='http', verify=True, cert=None, ca_cert=None): self.token = token self._read_timeout = 10 self.base_uri = uri(scheme, (host, port)) kwargs = {} if cert: if isinstance(cert, tuple): # Key and cert are separate kwargs['cert_file'] = cert[0] kwargs['key_file'] = cert[1] else: # combined certificate kwargs['cert_file'] = cert if ca_cert: kwargs['ca_certs'] = ca_cert if verify or ca_cert: kwargs['cert_reqs'] = ssl.CERT_REQUIRED self.http = urllib3.PoolManager(num_pools=10, **kwargs) self._ttl = None def set_read_timeout(self, timeout): self._read_timeout = timeout/3.0 @property def ttl(self): return self._ttl def set_ttl(self, ttl): ret = self._ttl != ttl self._ttl = ttl return ret @staticmethod def response(response): data = response.data.decode('utf-8') if response.status == 500: msg = '{0} {1}'.format(response.status, data) if data.startswith('Invalid Session TTL'): raise InvalidSessionTTL(msg) elif data.startswith('invalid session'): raise InvalidSession(msg) else: raise ConsulInternalError(msg) return base.Response(response.status, response.headers, data) def uri(self, path, params=None): return '{0}{1}{2}'.format(self.base_uri, path, params and '?' + urlencode(params) or '') def __getattr__(self, method): if method not in ('get', 'post', 'put', 'delete'): raise AttributeError("HTTPClient instance has no attribute '{0}'".format(method)) def wrapper(callback, path, params=None, data=''): # python-consul doesn't allow to specify ttl smaller then 10 seconds # because session_ttl_min defaults to 10s, so we have to do this ugly dirty hack... if method == 'put' and path == '/v1/session/create': ttl = '"ttl": "{0}s"'.format(self._ttl) if not data or data == '{}': data = '{' + ttl + '}' else: data = data[:-1] + ', ' + ttl + '}' if isinstance(params, list): # starting from v1.1.0 python-consul switched from `dict` to `list` for params params = {k: v for k, v in params} kwargs = {'retries': 0, 'preload_content': False, 'body': data} if method == 'get' and isinstance(params, dict) and 'index' in params: timeout = float(params['wait'][:-1]) if 'wait' in params else 300 # According to the documentation a small random amount of additional wait time is added to the # supplied maximum wait time to spread out the wake up time of any concurrent requests. This adds # up to wait / 16 additional time to the maximum duration. Since our goal is actually getting a # response rather read timeout we will add to the timeout a sligtly bigger value. kwargs['timeout'] = timeout + max(timeout/15.0, 1) else: kwargs['timeout'] = self._read_timeout token = params.pop('token', self.token) if isinstance(params, dict) else self.token kwargs['headers'] = urllib3.make_headers(user_agent=USER_AGENT) if token: kwargs['headers']['X-Consul-Token'] = token return callback(self.response(self.http.request(method.upper(), self.uri(path, params), **kwargs))) return wrapper class ConsulClient(base.Consul): def __init__(self, *args, **kwargs): self._cert = kwargs.pop('cert', None) self._ca_cert = kwargs.pop('ca_cert', None) self.token = kwargs.get('token') super(ConsulClient, self).__init__(*args, **kwargs) def connect(self, *args, **kwargs): kwargs.update(dict(zip(['host', 'port', 'scheme', 'verify'], args))) if self._cert: kwargs['cert'] = self._cert if self._ca_cert: kwargs['ca_cert'] = self._ca_cert if self.token: kwargs['token'] = self.token return HTTPClient(**kwargs) def reload_config(self, config): self.http.token = self.token = config.get('token') self.consistency = config.get('consistency', 'default') self.dc = config.get('dc') def catch_consul_errors(func): def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except (RetryFailedError, ConsulException, HTTPException, HTTPError, socket.error, socket.timeout): return False return wrapper def force_if_last_failed(func): def wrapper(*args, **kwargs): if wrapper.last_result is False: kwargs['force'] = True wrapper.last_result = func(*args, **kwargs) return wrapper.last_result wrapper.last_result = None return wrapper def service_name_from_scope_name(scope_name): """Translate scope name to service name which can be used in dns. 230 = 253 - len('replica.') - len('.service.consul') """ def replace_char(match): c = match.group(0) return '-' if c in '. _' else "u{:04d}".format(ord(c)) service_name = re.sub(r'[^a-z0-9\-]', replace_char, scope_name.lower()) return service_name[0:230] class Consul(AbstractDCS): def __init__(self, config): super(Consul, self).__init__(config) self._scope = config['scope'] self._session = None self.__do_not_watch = False self._retry = Retry(deadline=config['retry_timeout'], max_delay=1, max_tries=-1, retry_exceptions=(ConsulInternalError, HTTPException, HTTPError, socket.error, socket.timeout)) kwargs = {} if 'url' in config: r = urlparse(config['url']) config.update({'scheme': r.scheme, 'host': r.hostname, 'port': r.port or 8500}) elif 'host' in config: host, port = split_host_port(config.get('host', '127.0.0.1:8500'), 8500) config['host'] = host if 'port' not in config: config['port'] = int(port) if config.get('cacert'): config['ca_cert'] = config.pop('cacert') if config.get('key') and config.get('cert'): config['cert'] = (config['cert'], config['key']) config_keys = ('host', 'port', 'token', 'scheme', 'cert', 'ca_cert', 'dc', 'consistency') kwargs = {p: config.get(p) for p in config_keys if config.get(p)} verify = config.get('verify') if not isinstance(verify, bool): verify = parse_bool(verify) if isinstance(verify, bool): kwargs['verify'] = verify self._client = ConsulClient(**kwargs) self.set_retry_timeout(config['retry_timeout']) self.set_ttl(config.get('ttl') or 30) self._last_session_refresh = 0 self.__session_checks = config.get('checks', []) self._register_service = config.get('register_service', False) if self._register_service: self._service_name = service_name_from_scope_name(self._scope) if self._scope != self._service_name: logger.warning('Using %s as consul service name instead of scope name %s', self._service_name, self._scope) self._service_check_interval = config.get('service_check_interval', '5s') if not self._ctl: self.create_session() def retry(self, *args, **kwargs): return self._retry.copy()(*args, **kwargs) def create_session(self): while not self._session: try: self.refresh_session() except ConsulError: logger.info('waiting on consul') time.sleep(5) def reload_config(self, config): super(Consul, self).reload_config(config) self._client.reload_config(config.get('consul', {})) def set_ttl(self, ttl): if self._client.http.set_ttl(ttl/2.0): # Consul multiplies the TTL by 2x self._session = None self.__do_not_watch = True @property def ttl(self): return self._client.http.ttl def set_retry_timeout(self, retry_timeout): self._retry.deadline = retry_timeout self._client.http.set_read_timeout(retry_timeout) def adjust_ttl(self): try: settings = self._client.agent.self() min_ttl = (settings['Config']['SessionTTLMin'] or 10000000000)/1000000000.0 logger.warning('Changing Session TTL from %s to %s', self._client.http.ttl, min_ttl) self._client.http.set_ttl(min_ttl) except Exception: logger.exception('adjust_ttl') def _do_refresh_session(self): """:returns: `!True` if it had to create new session""" if self._session and self._last_session_refresh + self._loop_wait > time.time(): return False if self._session: try: self._client.session.renew(self._session) except NotFound: self._session = None ret = not self._session if ret: try: self._session = self._client.session.create(name=self._scope + '-' + self._name, checks=self.__session_checks, lock_delay=0.001, behavior='delete') except InvalidSessionTTL: logger.exception('session.create') self.adjust_ttl() raise self._last_session_refresh = time.time() return ret def refresh_session(self): try: return self.retry(self._do_refresh_session) except (ConsulException, RetryFailedError): logger.exception('refresh_session') raise ConsulError('Failed to renew/create session') def client_path(self, path): return super(Consul, self).client_path(path)[1:] @staticmethod def member(node): return Member.from_node(node['ModifyIndex'], os.path.basename(node['Key']), node.get('Session'), node['Value']) def _load_cluster(self): try: path = self.client_path('/') _, results = self.retry(self._client.kv.get, path, recurse=True) if results is None: raise NotFound nodes = {} for node in results: node['Value'] = (node['Value'] or b'').decode('utf-8') nodes[node['Key'][len(path):].lstrip('/')] = node # get initialize flag initialize = nodes.get(self._INITIALIZE) initialize = initialize and initialize['Value'] # get global dynamic configuration config = nodes.get(self._CONFIG) config = config and ClusterConfig.from_node(config['ModifyIndex'], config['Value']) # get timeline history history = nodes.get(self._HISTORY) history = history and TimelineHistory.from_node(history['ModifyIndex'], history['Value']) # get last leader operation last_leader_operation = nodes.get(self._LEADER_OPTIME) last_leader_operation = 0 if last_leader_operation is None else int(last_leader_operation['Value']) # get list of members members = [self.member(n) for k, n in nodes.items() if k.startswith(self._MEMBERS) and k.count('/') == 1] # get leader leader = nodes.get(self._LEADER) if not self._ctl and leader and leader['Value'] == self._name \ and self._session != leader.get('Session', 'x'): logger.info('I am leader but not owner of the session. Removing leader node') self._client.kv.delete(self.leader_path, cas=leader['ModifyIndex']) leader = None if leader: member = Member(-1, leader['Value'], None, {}) member = ([m for m in members if m.name == leader['Value']] or [member])[0] leader = Leader(leader['ModifyIndex'], leader.get('Session'), member) # failover key failover = nodes.get(self._FAILOVER) if failover: failover = Failover.from_node(failover['ModifyIndex'], failover['Value']) # get synchronization state sync = nodes.get(self._SYNC) sync = SyncState.from_node(sync and sync['ModifyIndex'], sync and sync['Value']) return Cluster(initialize, config, leader, last_leader_operation, members, failover, sync, history) except NotFound: return Cluster(None, None, None, None, [], None, None, None) except Exception: logger.exception('get_cluster') raise ConsulError('Consul is not responding properly') @catch_consul_errors def touch_member(self, data, permanent=False): cluster = self.cluster member = cluster and cluster.get_member(self._name, fallback_to_leader=False) create_member = not permanent and self.refresh_session() if member and (create_member or member.session != self._session): self._client.kv.delete(self.member_path) create_member = True if not create_member and member and deep_compare(data, member.data): return True try: args = {} if permanent else {'acquire': self._session} self._client.kv.put(self.member_path, json.dumps(data, separators=(',', ':')), **args) if self._register_service: self.update_service(not create_member and member and member.data or {}, data) return True except InvalidSession: self._session = None logger.error('Our session disappeared from Consul, can not "touch_member"') except Exception: logger.exception('touch_member') return False @catch_consul_errors def register_service(self, service_name, **kwargs): logger.info('Register service %s, params %s', service_name, kwargs) return self._client.agent.service.register(service_name, **kwargs) @catch_consul_errors def deregister_service(self, service_id): logger.info('Deregister service %s', service_id) # service_id can contain special characters, but is used as part of uri in deregister request service_id = quote(service_id) return self._client.agent.service.deregister(service_id) def _update_service(self, data): service_name = self._service_name role = data['role'].replace('_', '-') state = data['state'] api_parts = urlparse(data['api_url']) api_parts = api_parts._replace(path='/{0}'.format(role)) conn_parts = urlparse(data['conn_url']) check = base.Check.http(api_parts.geturl(), self._service_check_interval, deregister='{0}s'.format(self._client.http.ttl * 10)) params = { 'service_id': '{0}/{1}'.format(self._scope, self._name), 'address': conn_parts.hostname, 'port': conn_parts.port, 'check': check, 'tags': [role] } if state == 'stopped': return self.deregister_service(params['service_id']) if role in ['master', 'replica', 'standby-leader']: if state != 'running': return return self.register_service(service_name, **params) logger.warning('Could not register service: unknown role type %s', role) @force_if_last_failed def update_service(self, old_data, new_data, force=False): update = False for key in ['role', 'api_url', 'conn_url', 'state']: if key not in new_data: logger.warning('Could not register service: not enough params in member data') return if old_data.get(key) != new_data[key]: update = True if force or update: return self._update_service(new_data) @catch_consul_errors def _do_attempt_to_acquire_leader(self, permanent): try: kwargs = {} if permanent else {'acquire': self._session} return self.retry(self._client.kv.put, self.leader_path, self._name, **kwargs) except InvalidSession: self._session = None logger.error('Our session disappeared from Consul. Will try to get a new one and retry attempt') self.refresh_session() return self.retry(self._client.kv.put, self.leader_path, self._name, acquire=self._session) def attempt_to_acquire_leader(self, permanent=False): if not self._session and not permanent: self.refresh_session() ret = self._do_attempt_to_acquire_leader(permanent) if not ret: logger.info('Could not take out TTL lock') return ret def take_leader(self): return self.attempt_to_acquire_leader() @catch_consul_errors def set_failover_value(self, value, index=None): return self._client.kv.put(self.failover_path, value, cas=index) @catch_consul_errors def set_config_value(self, value, index=None): return self._client.kv.put(self.config_path, value, cas=index) @catch_consul_errors def _write_leader_optime(self, last_operation): return self._client.kv.put(self.leader_optime_path, last_operation) @catch_consul_errors def _update_leader(self): if self._session: self.retry(self._client.session.renew, self._session) self._last_session_refresh = time.time() return bool(self._session) @catch_consul_errors def initialize(self, create_new=True, sysid=''): kwargs = {'cas': 0} if create_new else {} return self.retry(self._client.kv.put, self.initialize_path, sysid, **kwargs) @catch_consul_errors def cancel_initialization(self): return self.retry(self._client.kv.delete, self.initialize_path) @catch_consul_errors def delete_cluster(self): return self.retry(self._client.kv.delete, self.client_path(''), recurse=True) @catch_consul_errors def set_history_value(self, value): return self._client.kv.put(self.history_path, value) @catch_consul_errors def delete_leader(self): cluster = self.cluster if cluster and isinstance(cluster.leader, Leader) and cluster.leader.name == self._name: return self._client.kv.delete(self.leader_path, cas=cluster.leader.index) @catch_consul_errors def set_sync_state_value(self, value, index=None): return self.retry(self._client.kv.put, self.sync_path, value, cas=index) @catch_consul_errors def delete_sync_state(self, index=None): return self.retry(self._client.kv.delete, self.sync_path, cas=index) def watch(self, leader_index, timeout): self._last_session_refresh = 0 if self.__do_not_watch: self.__do_not_watch = False return True if leader_index: end_time = time.time() + timeout while timeout >= 1: try: idx, _ = self._client.kv.get(self.leader_path, index=leader_index, wait=str(timeout) + 's') return str(idx) != str(leader_index) except (ConsulException, HTTPException, HTTPError, socket.error, socket.timeout): logger.exception('watch') timeout = end_time - time.time() try: return super(Consul, self).watch(None, timeout) finally: self.event.clear() patroni-1.6.4/patroni/dcs/etcd.py000066400000000000000000000653561361356115100167350ustar00rootroot00000000000000from __future__ import absolute_import import etcd import json import logging import os import urllib3.util.connection import random import six import socket import time from dns.exception import DNSException from dns import resolver from urllib3.exceptions import HTTPError, ReadTimeoutError, ProtocolError from six.moves.queue import Queue from six.moves.http_client import HTTPException from six.moves.urllib_parse import urlparse from threading import Thread from . import AbstractDCS, Cluster, ClusterConfig, Failover, Leader, Member, SyncState, TimelineHistory from ..exceptions import DCSError from ..request import get as requests_get from ..utils import Retry, RetryFailedError, split_host_port, uri, USER_AGENT logger = logging.getLogger(__name__) class EtcdRaftInternal(etcd.EtcdException): """Raft Internal Error""" class EtcdError(DCSError): pass class DnsCachingResolver(Thread): def __init__(self, cache_time=600.0, cache_fail_time=30.0): super(DnsCachingResolver, self).__init__() self._cache = {} self._cache_time = cache_time self._cache_fail_time = cache_fail_time self._resolve_queue = Queue() self.daemon = True self.start() def run(self): while True: (host, port), attempt = self._resolve_queue.get() response = self._do_resolve(host, port) if response: self._cache[(host, port)] = (time.time(), response) else: if attempt < 10: self.resolve_async(host, port, attempt + 1) time.sleep(1) def resolve(self, host, port): current_time = time.time() cached_time, response = self._cache.get((host, port), (0, [])) time_passed = current_time - cached_time if time_passed > self._cache_time or (not response and time_passed > self._cache_fail_time): new_response = self._do_resolve(host, port) if new_response: self._cache[(host, port)] = (current_time, new_response) response = new_response return response def resolve_async(self, host, port, attempt=0): self._resolve_queue.put(((host, port), attempt)) @staticmethod def _do_resolve(host, port): try: return socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM, socket.IPPROTO_TCP) except Exception as e: logger.warning('failed to resolve host %s: %s', host, e) return [] class Client(etcd.Client): def __init__(self, config, dns_resolver, cache_ttl=300): self._dns_resolver = dns_resolver self.set_machines_cache_ttl(cache_ttl) self._machines_cache_updated = 0 args = {p: config.get(p) for p in ('host', 'port', 'protocol', 'use_proxies', 'username', 'password', 'cert', 'ca_cert') if config.get(p)} super(Client, self).__init__(read_timeout=config['retry_timeout'], **args) # For some reason python3-etcd on debian and ubuntu are not based on the latest version # Workaround for the case when https://github.com/jplana/python-etcd/pull/196 is not applied self.http.connection_pool_kw.pop('ssl_version', None) self._config = config self._load_machines_cache() self._allow_reconnect = True # allow passing retry argument to api_execute in params self._comparison_conditions.add('retry') self._read_options.add('retry') self._del_conditions.add('retry') def _calculate_timeouts(self, etcd_nodes=None, timeout=None): """Calculate a request timeout and number of retries per single etcd node. In case if the timeout per node is too small (less than one second) we will reduce the number of nodes. For the cluster with only one node we will try to do 2 retries. For clusters with 2 nodes we will try to do 1 retry for every node. No retries for clusters with 3 or more nodes. We better rely on switching to a different node.""" etcd_nodes = etcd_nodes or len(self._machines_cache) + 1 per_node_timeout = timeout = float(timeout or self.read_timeout) max_retries = 4 - min(etcd_nodes, 3) per_node_retries = 1 min_timeout = 1.0 while etcd_nodes > 0: per_node_timeout = float(timeout) / etcd_nodes if per_node_timeout >= min_timeout: # for small clusters we will try to do more than on try on every node while per_node_retries < max_retries and per_node_timeout / (per_node_retries + 1) >= min_timeout: per_node_retries += 1 per_node_timeout /= per_node_retries break # if the timeout per one node is to small try to reduce number of nodes etcd_nodes -= 1 max_retries = 1 return etcd_nodes, per_node_timeout, per_node_retries - 1 def _get_headers(self): basic_auth = ':'.join((self.username, self.password)) if self.username and self.password else None return urllib3.make_headers(basic_auth=basic_auth, user_agent=USER_AGENT) def _build_request_parameters(self, timeout=None): kwargs = {'headers': self._get_headers(), 'redirect': self.allow_redirect} if timeout is not None: kwargs.update(retries=0, timeout=timeout) else: _, per_node_timeout, per_node_retries = self._calculate_timeouts() kwargs.update(timeout=per_node_timeout, retries=per_node_retries) return kwargs def set_machines_cache_ttl(self, cache_ttl): self._machines_cache_ttl = cache_ttl @property def machines(self): """Original `machines` method(property) of `etcd.Client` class raise exception when it failed to get list of etcd cluster members. This method is being called only when request failed on one of the etcd members during `api_execute` call. For us it's more important to execute original request rather then get new topology of etcd cluster. So we will catch this exception and return empty list of machines. Later, during next `api_execute` call we will forcefully update machines_cache. Also this method implements the same timeout-retry logic as `api_execute`, because the original method was retrying 2 times with the `read_timeout` on each node.""" kwargs = self._build_request_parameters() while True: try: response = self.http.request(self._MGET, self._base_uri + self.version_prefix + '/machines', **kwargs) data = self._handle_server_response(response).data.decode('utf-8') machines = [m.strip() for m in data.split(',') if m.strip()] logger.debug("Retrieved list of machines: %s", machines) if not machines: raise etcd.EtcdException random.shuffle(machines) for url in machines: r = urlparse(url) port = r.port or (443 if r.scheme == 'https' else 80) self._dns_resolver.resolve_async(r.hostname, port) return machines except Exception as e: # We can't get the list of machines, if one server is in the # machines cache, try on it logger.error("Failed to get list of machines from %s%s: %r", self._base_uri, self.version_prefix, e) if self._machines_cache: self._base_uri = self._machines_cache.pop(0) logger.info("Retrying on %s", self._base_uri) elif self._update_machines_cache: raise etcd.EtcdException("Could not get the list of servers, " "maybe you provided the wrong " "host(s) to connect to?") else: return [] def set_read_timeout(self, timeout): self._read_timeout = timeout def _do_http_request(self, request_executor, method, url, fields=None, **kwargs): try: response = request_executor(method, url, fields=fields, **kwargs) response.data.decode('utf-8') self._check_cluster_id(response) except (HTTPError, HTTPException, socket.error, socket.timeout) as e: if (isinstance(fields, dict) and fields.get("wait") == "true" and isinstance(e, (ReadTimeoutError, ProtocolError))): logger.debug("Watch timed out.") # switch to the next etcd node because we don't know exactly what happened, # whether the key didn't received an update or there is a network problem. self._machines_cache.insert(0, self._base_uri) self._base_uri = self._next_server() raise etcd.EtcdWatchTimedOut("Watch timed out: {0}".format(e), cause=e) logger.error("Request to server %s failed: %r", self._base_uri, e) logger.info("Reconnection allowed, looking for another server.") self._base_uri = self._next_server(cause=e) response = False return response def api_execute(self, path, method, params=None, timeout=None): if not path.startswith('/'): raise ValueError('Path does not start with /') retry = params.pop('retry', None) if isinstance(params, dict) else None kwargs = {'fields': params, 'preload_content': False} if method in [self._MGET, self._MDELETE]: request_executor = self.http.request elif method in [self._MPUT, self._MPOST]: request_executor = self.http.request_encode_body kwargs['encode_multipart'] = False else: raise etcd.EtcdException('HTTP method {0} not supported'.format(method)) # Update machines_cache if previous attempt of update has failed if self._update_machines_cache: self._load_machines_cache() elif not self._use_proxies and time.time() - self._machines_cache_updated > self._machines_cache_ttl: self._refresh_machines_cache() kwargs.update(self._build_request_parameters(timeout)) if retry: machines_cache = [self._base_uri] + self._machines_cache response = False while True: try: some_request_failed = False while not response: response = self._do_http_request(request_executor, method, self._base_uri + path, **kwargs) if response is False: if not retry: raise etcd.EtcdException('{0} {1} request failed'.format(method, path)) some_request_failed = True if some_request_failed: self._refresh_machines_cache() if response: break except etcd.EtcdConnectionFailed: if not retry: raise sleeptime = retry.sleeptime remaining_time = retry.stoptime - sleeptime - time.time() nodes, timeout, retries = self._calculate_timeouts(len(machines_cache), remaining_time) if nodes == 0: self._update_machines_cache = True raise retry.sleep_func(sleeptime) retry.update_delay() # We still have some time left. Partially restore `_machines_cache` and retry request kwargs.update(timeout=timeout, retries=retries) self._base_uri = machines_cache[0] self._machines_cache = machines_cache[1:nodes] return self._handle_server_response(response) @staticmethod def get_srv_record(host): try: return [(r.target.to_text(True), r.port) for r in resolver.query(host, 'SRV')] except DNSException: return [] def _get_machines_cache_from_srv(self, srv): """Fetch list of etcd-cluster member by resolving _etcd-server._tcp. SRV record. This record should contain list of host and peer ports which could be used to run 'GET http://{host}:{port}/members' request (peer protocol)""" ret = [] for r in ['-client-ssl', '-client', '-ssl', '', '-server-ssl', '-server']: protocol = 'https' if '-ssl' in r else 'http' endpoint = '/members' if '-server' in r else '' for host, port in self.get_srv_record('_etcd{0}._tcp.{1}'.format(r, srv)): url = uri(protocol, (host, port), endpoint) if endpoint: try: response = requests_get(url, timeout=self.read_timeout, verify=False) if response.status < 400: for member in json.loads(response.data.decode('utf-8')): ret.extend(member['clientURLs']) break except Exception: logger.exception('GET %s', url) else: ret.append(url) if ret: self._protocol = protocol break else: logger.warning('Can not resolve SRV for %s', srv) return list(set(ret)) def _get_machines_cache_from_dns(self, host, port): """One host might be resolved into multiple ip addresses. We will make list out of it""" if self.protocol == 'http': ret = map(lambda res: uri(self.protocol, res[-1][:2]), self._dns_resolver.resolve(host, port)) if ret: return list(set(ret)) return [uri(self.protocol, (host, port))] def _get_machines_cache_from_config(self): if 'proxy' in self._config: return [uri(self.protocol, (self._config['host'], self._config['port']))] machines_cache = [] if 'srv' in self._config: machines_cache = self._get_machines_cache_from_srv(self._config['srv']) if not machines_cache and 'hosts' in self._config: machines_cache = list(self._config['hosts']) if not machines_cache and 'host' in self._config: machines_cache = self._get_machines_cache_from_dns(self._config['host'], self._config['port']) return machines_cache def _load_machines_cache(self): """This method should fill up `_machines_cache` from scratch. It could happen only in two cases: 1. During class initialization 2. When all etcd members failed""" self._update_machines_cache = True if 'srv' not in self._config and 'host' not in self._config and 'hosts' not in self._config: raise Exception('Neither srv, hosts, host nor url are defined in etcd section of config') self._machines_cache = self._get_machines_cache_from_config() # Can not bootstrap list of etcd-cluster members, giving up if not self._machines_cache: raise etcd.EtcdException # After filling up initial list of machines_cache we should ask etcd-cluster about actual list self._base_uri = self._next_server() self._refresh_machines_cache() self._update_machines_cache = False def _refresh_machines_cache(self): self._machines_cache = self._get_machines_cache_from_config() if self._use_proxies else self.machines if self._base_uri in self._machines_cache: self._machines_cache.remove(self._base_uri) elif self._machines_cache: self._base_uri = self._next_server() self._machines_cache_updated = time.time() class Etcd(AbstractDCS): def __init__(self, config): super(Etcd, self).__init__(config) self._ttl = int(config.get('ttl') or 30) self._retry = Retry(deadline=config['retry_timeout'], max_delay=1, max_tries=-1, retry_exceptions=(etcd.EtcdLeaderElectionInProgress, EtcdRaftInternal)) self._client = self.get_etcd_client(config) self.__do_not_watch = False self._has_failed = False def retry(self, *args, **kwargs): retry = self._retry.copy() kwargs['retry'] = retry return retry(*args, **kwargs) def _handle_exception(self, e, name='', do_sleep=False, raise_ex=None): if not self._has_failed: logger.exception(name) else: logger.error(e) if do_sleep: time.sleep(1) self._has_failed = True if isinstance(raise_ex, Exception): raise raise_ex def catch_etcd_errors(func): def wrapper(self, *args, **kwargs): try: retval = func(self, *args, **kwargs) is not None self._has_failed = False return retval except (RetryFailedError, etcd.EtcdException) as e: self._handle_exception(e) return False except Exception as e: self._handle_exception(e, raise_ex=EtcdError('unexpected error')) return wrapper @staticmethod def get_etcd_client(config): if 'proxy' in config: config['use_proxies'] = True config['url'] = config['proxy'] if 'url' in config: r = urlparse(config['url']) config.update({'protocol': r.scheme, 'host': r.hostname, 'port': r.port or 2379, 'username': r.username, 'password': r.password}) elif 'hosts' in config: hosts = config.pop('hosts') default_port = config.pop('port', 2379) protocol = config.get('protocol', 'http') if isinstance(hosts, six.string_types): hosts = hosts.split(',') config['hosts'] = [] for value in hosts: if isinstance(value, six.string_types): config['hosts'].append(uri(protocol, split_host_port(value.strip(), default_port))) elif 'host' in config: host, port = split_host_port(config['host'], 2379) config['host'] = host if 'port' not in config: config['port'] = int(port) if config.get('cacert'): config['ca_cert'] = config.pop('cacert') if config.get('key') and config.get('cert'): config['cert'] = (config['cert'], config['key']) for p in ('discovery_srv', 'srv_domain'): if p in config: config['srv'] = config.pop(p) dns_resolver = DnsCachingResolver() def create_connection_patched(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None, socket_options=None): host, port = address if host.startswith('['): host = host.strip('[]') err = None for af, socktype, proto, _, sa in dns_resolver.resolve(host, port): sock = None try: sock = socket.socket(af, socktype, proto) if socket_options: for opt in socket_options: sock.setsockopt(*opt) if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(timeout) if source_address: sock.bind(source_address) sock.connect(sa) return sock except socket.error as e: err = e if sock is not None: sock.close() sock = None if err is not None: raise err raise socket.error("getaddrinfo returns an empty list") urllib3.util.connection.create_connection = create_connection_patched client = None while not client: try: client = Client(config, dns_resolver) if 'use_proxies' in config and not client.machines: raise etcd.EtcdException except etcd.EtcdException: logger.info('waiting on etcd') time.sleep(5) return client def set_ttl(self, ttl): ttl = int(ttl) self.__do_not_watch = self._ttl != ttl self._ttl = ttl self._client.set_machines_cache_ttl(ttl*10) @property def ttl(self): return self._ttl def set_retry_timeout(self, retry_timeout): self._retry.deadline = retry_timeout self._client.set_read_timeout(retry_timeout) @staticmethod def member(node): return Member.from_node(node.modifiedIndex, os.path.basename(node.key), node.ttl, node.value) def _load_cluster(self): cluster = None try: result = self.retry(self._client.read, self.client_path(''), recursive=True) nodes = {node.key[len(result.key):].lstrip('/'): node for node in result.leaves} # get initialize flag initialize = nodes.get(self._INITIALIZE) initialize = initialize and initialize.value # get global dynamic configuration config = nodes.get(self._CONFIG) config = config and ClusterConfig.from_node(config.modifiedIndex, config.value) # get timeline history history = nodes.get(self._HISTORY) history = history and TimelineHistory.from_node(history.modifiedIndex, history.value) # get last leader operation last_leader_operation = nodes.get(self._LEADER_OPTIME) last_leader_operation = 0 if last_leader_operation is None else int(last_leader_operation.value) # get list of members members = [self.member(n) for k, n in nodes.items() if k.startswith(self._MEMBERS) and k.count('/') == 1] # get leader leader = nodes.get(self._LEADER) if leader: member = Member(-1, leader.value, None, {}) member = ([m for m in members if m.name == leader.value] or [member])[0] index = result.etcd_index if result.etcd_index > leader.modifiedIndex else leader.modifiedIndex + 1 leader = Leader(index, leader.ttl, member) # failover key failover = nodes.get(self._FAILOVER) if failover: failover = Failover.from_node(failover.modifiedIndex, failover.value) # get synchronization state sync = nodes.get(self._SYNC) sync = SyncState.from_node(sync and sync.modifiedIndex, sync and sync.value) cluster = Cluster(initialize, config, leader, last_leader_operation, members, failover, sync, history) except etcd.EtcdKeyNotFound: cluster = Cluster(None, None, None, None, [], None, None, None) except Exception as e: self._handle_exception(e, 'get_cluster', raise_ex=EtcdError('Etcd is not responding properly')) self._has_failed = False return cluster @catch_etcd_errors def touch_member(self, data, permanent=False): data = json.dumps(data, separators=(',', ':')) return self._client.set(self.member_path, data, None if permanent else self._ttl) @catch_etcd_errors def take_leader(self): return self.retry(self._client.write, self.leader_path, self._name, ttl=self._ttl) def attempt_to_acquire_leader(self, permanent=False): try: return bool(self.retry(self._client.write, self.leader_path, self._name, ttl=None if permanent else self._ttl, prevExist=False)) except etcd.EtcdAlreadyExist: logger.info('Could not take out TTL lock') except (RetryFailedError, etcd.EtcdException): pass return False @catch_etcd_errors def set_failover_value(self, value, index=None): return self._client.write(self.failover_path, value, prevIndex=index or 0) @catch_etcd_errors def set_config_value(self, value, index=None): return self._client.write(self.config_path, value, prevIndex=index or 0) @catch_etcd_errors def _write_leader_optime(self, last_operation): return self._client.set(self.leader_optime_path, last_operation) @catch_etcd_errors def _update_leader(self): return self.retry(self._client.write, self.leader_path, self._name, prevValue=self._name, ttl=self._ttl) @catch_etcd_errors def initialize(self, create_new=True, sysid=""): return self.retry(self._client.write, self.initialize_path, sysid, prevExist=(not create_new)) @catch_etcd_errors def delete_leader(self): return self._client.delete(self.leader_path, prevValue=self._name) @catch_etcd_errors def cancel_initialization(self): return self.retry(self._client.delete, self.initialize_path) @catch_etcd_errors def delete_cluster(self): return self.retry(self._client.delete, self.client_path(''), recursive=True) @catch_etcd_errors def set_history_value(self, value): return self._client.write(self.history_path, value) @catch_etcd_errors def set_sync_state_value(self, value, index=None): return self.retry(self._client.write, self.sync_path, value, prevIndex=index or 0) @catch_etcd_errors def delete_sync_state(self, index=None): return self.retry(self._client.delete, self.sync_path, prevIndex=index or 0) def watch(self, leader_index, timeout): if self.__do_not_watch: self.__do_not_watch = False return True if leader_index: end_time = time.time() + timeout while timeout >= 1: # when timeout is too small urllib3 doesn't have enough time to connect try: self._client.watch(self.leader_path, index=leader_index, timeout=timeout + 0.5) self._has_failed = False # Synchronous work of all cluster members with etcd is less expensive # than reestablishing http connection every time from every replica. return True except etcd.EtcdWatchTimedOut: self._client.http.clear() self._has_failed = False return False except (etcd.EtcdEventIndexCleared, etcd.EtcdWatcherCleared): # Watch failed self._has_failed = False return True # leave the loop, because watch with the same parameters will fail anyway except etcd.EtcdException as e: self._handle_exception(e, 'watch', True) timeout = end_time - time.time() try: return super(Etcd, self).watch(None, timeout) finally: self.event.clear() etcd.EtcdError.error_exceptions[300] = EtcdRaftInternal patroni-1.6.4/patroni/dcs/exhibitor.py000066400000000000000000000051321361356115100177750ustar00rootroot00000000000000import json import logging import random import time from patroni.dcs.zookeeper import ZooKeeper from patroni.request import get as requests_get from patroni.utils import uri logger = logging.getLogger(__name__) class ExhibitorEnsembleProvider(object): TIMEOUT = 3.1 def __init__(self, hosts, port, uri_path='/exhibitor/v1/cluster/list', poll_interval=300): self._exhibitor_port = port self._uri_path = uri_path self._poll_interval = poll_interval self._exhibitors = hosts self._master_exhibitors = hosts self._zookeeper_hosts = '' self._next_poll = None while not self.poll(): logger.info('waiting on exhibitor') time.sleep(5) def poll(self): if self._next_poll and self._next_poll > time.time(): return False json = self._query_exhibitors(self._exhibitors) if not json: json = self._query_exhibitors(self._master_exhibitors) if isinstance(json, dict) and 'servers' in json and 'port' in json: self._next_poll = time.time() + self._poll_interval zookeeper_hosts = ','.join([h + ':' + str(json['port']) for h in sorted(json['servers'])]) if self._zookeeper_hosts != zookeeper_hosts: logger.info('ZooKeeper connection string has changed: %s => %s', self._zookeeper_hosts, zookeeper_hosts) self._zookeeper_hosts = zookeeper_hosts self._exhibitors = json['servers'] return True return False def _query_exhibitors(self, exhibitors): random.shuffle(exhibitors) for host in exhibitors: try: response = requests_get(uri('http', (host, self._exhibitor_port), self._uri_path), timeout=self.TIMEOUT) return json.loads(response.data.decode('utf-8')) except Exception: logging.debug('Request to %s failed', host) return None @property def zookeeper_hosts(self): return self._zookeeper_hosts class Exhibitor(ZooKeeper): def __init__(self, config): interval = config.get('poll_interval', 300) self._ensemble_provider = ExhibitorEnsembleProvider(config['hosts'], config['port'], poll_interval=interval) config = config.copy() config['hosts'] = self._ensemble_provider.zookeeper_hosts super(Exhibitor, self).__init__(config) def _load_cluster(self): if self._ensemble_provider.poll(): self._client.set_hosts(self._ensemble_provider.zookeeper_hosts) return super(Exhibitor, self)._load_cluster() patroni-1.6.4/patroni/dcs/kubernetes.py000066400000000000000000000622051361356115100201530ustar00rootroot00000000000000from __future__ import absolute_import import datetime import functools import json import logging import socket import sys import time from kubernetes import client as k8s_client, config as k8s_config, watch as k8s_watch from urllib3 import Timeout from urllib3.exceptions import HTTPError from six.moves.http_client import HTTPException from threading import Condition, Lock, Thread from . import AbstractDCS, Cluster, ClusterConfig, Failover, Leader, Member, SyncState, TimelineHistory from ..exceptions import DCSError from ..utils import deep_compare, Retry, RetryFailedError, tzutc, USER_AGENT logger = logging.getLogger(__name__) class KubernetesError(DCSError): pass class KubernetesRetriableException(k8s_client.rest.ApiException): def __init__(self, orig): super(KubernetesRetriableException, self).__init__(orig.status, orig.reason) self.body = orig.body self.headers = orig.headers class CoreV1ApiProxy(object): def __init__(self, use_endpoints=False): self._api = k8s_client.CoreV1Api() self._api.api_client.user_agent = USER_AGENT self._api.api_client.rest_client.pool_manager.connection_pool_kw['maxsize'] = 10 self._request_timeout = None self._use_endpoints = use_endpoints def configure_timeouts(self, loop_wait, retry_timeout, ttl): # Normally every loop_wait seconds we should have receive something from the socket. # If we didn't received anything after the loop_wait + retry_timeout it is a time # to start worrying (send keepalive messages). Finally, the connection should be # considered as dead if we received nothing from the socket after the ttl seconds. cnt = 3 idle = int(loop_wait + retry_timeout) intvl = max(1, int(float(ttl - idle) / cnt)) self._api.api_client.rest_client.pool_manager.connection_pool_kw['socket_options'] = [ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), (socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, idle), (socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, intvl), (socket.IPPROTO_TCP, socket.TCP_KEEPCNT, cnt), (socket.IPPROTO_TCP, 18, int(ttl * 1000)) # TCP_USER_TIMEOUT ] self._request_timeout = (1, retry_timeout / 3.0) def __getattr__(self, func): if func.endswith('_kind'): func = func[:-4] + ('endpoints' if self._use_endpoints else 'config_map') def wrapper(*args, **kwargs): if '_request_timeout' not in kwargs: kwargs['_request_timeout'] = self._request_timeout try: return getattr(self._api, func)(*args, **kwargs) except k8s_client.rest.ApiException as e: if e.status in (502, 503, 504): # XXX raise KubernetesRetriableException(e) raise return wrapper def catch_kubernetes_errors(func): def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except k8s_client.rest.ApiException as e: if e.status == 403: logger.exception('Permission denied') elif e.status != 409: # Object exists or conflict in resource_version logger.exception('Unexpected error from Kubernetes API') return False except (RetryFailedError, HTTPException, HTTPError, socket.error, socket.timeout): return False return wrapper class ObjectCache(Thread): def __init__(self, dcs, func, retry, condition): Thread.__init__(self) self.daemon = True self._api_client = k8s_client.ApiClient() self._dcs = dcs self._func = func self._retry = retry self._condition = condition self._is_ready = False self._object_cache = {} self._object_cache_lock = Lock() self._annotations_map = {self._dcs.leader_path: self._dcs._LEADER, self._dcs.config_path: self._dcs._CONFIG} self.start() def _list(self): return self._func(_request_timeout=(self._retry.deadline, Timeout.DEFAULT_TIMEOUT)) def _watch(self, resource_version): return self._func(_request_timeout=(self._retry.deadline, Timeout.DEFAULT_TIMEOUT), _preload_content=False, watch=True, resource_version=resource_version) def set(self, name, value): with self._object_cache_lock: old_value = self._object_cache.get(name) ret = not old_value or int(old_value.metadata.resource_version) < int(value.metadata.resource_version) if ret: self._object_cache[name] = value return ret, old_value def delete(self, name, resource_version): with self._object_cache_lock: old_value = self._object_cache.get(name) ret = old_value and int(old_value.metadata.resource_version) < int(resource_version) if ret: del self._object_cache[name] return not old_value or ret, old_value def copy(self): with self._object_cache_lock: return self._object_cache.copy() def _build_cache(self): objects = self._list() return_type = 'V1' + objects.kind[:-4] with self._object_cache_lock: self._object_cache = {item.metadata.name: item for item in objects.items} with self._condition: self._is_ready = True self._condition.notify() response = self._watch(objects.metadata.resource_version) try: for line in k8s_watch.watch.iter_resp_lines(response): event = json.loads(line) obj = event['object'] if obj.get('code') == 410: break ev_type = event['type'] name = obj['metadata']['name'] if ev_type in ('ADDED', 'MODIFIED'): obj = k8s_watch.watch.SimpleNamespace(data=json.dumps(obj)) obj = self._api_client.deserialize(obj, return_type) success, old_value = self.set(name, obj) if success: new_value = (obj.metadata.annotations or {}).get(self._annotations_map.get(name)) elif ev_type == 'DELETED': success, old_value = self.delete(name, obj['metadata']['resourceVersion']) new_value = None else: logger.warning('Unexpected event type: %s', ev_type) continue if success and return_type != 'V1Pod': if old_value: old_value = (old_value.metadata.annotations or {}).get(self._annotations_map.get(name)) if old_value != new_value and \ (name != self._dcs.config_path or old_value is not None and new_value is not None): logger.debug('%s changed from %s to %s', name, old_value, new_value) self._dcs.event.set() finally: with self._condition: self._is_ready = False response.close() response.release_conn() def run(self): while True: try: self._build_cache() except Exception as e: with self._condition: self._is_ready = False logger.error('ObjectCache.run %r', e) def is_ready(self): """Must be called only when holding the lock on `_condition`""" return self._is_ready class Kubernetes(AbstractDCS): def __init__(self, config): self._labels = config['labels'] self._labels[config.get('scope_label', 'cluster-name')] = config['scope'] self._label_selector = ','.join('{0}={1}'.format(k, v) for k, v in self._labels.items()) self._namespace = config.get('namespace') or 'default' self._role_label = config.get('role_label', 'role') config['namespace'] = '' super(Kubernetes, self).__init__(config) self._retry = Retry(deadline=config['retry_timeout'], max_delay=1, max_tries=-1, retry_exceptions=(KubernetesRetriableException, HTTPException, HTTPError, socket.error, socket.timeout)) self._ttl = None try: k8s_config.load_incluster_config() except k8s_config.ConfigException: k8s_config.load_kube_config(context=config.get('context', 'local')) self.__subsets = None use_endpoints = config.get('use_endpoints') and (config.get('patronictl') or 'pod_ip' in config) if use_endpoints: addresses = [k8s_client.V1EndpointAddress(ip='127.0.0.1' if config.get('patronictl') else config['pod_ip'])] ports = [] for p in config.get('ports', [{}]): port = {'port': int(p.get('port', '5432'))} port.update({n: p[n] for n in ('name', 'protocol') if p.get(n)}) ports.append(k8s_client.V1EndpointPort(**port)) self.__subsets = [k8s_client.V1EndpointSubset(addresses=addresses, ports=ports)] self._should_create_config_service = True self._api = CoreV1ApiProxy(use_endpoints) self.reload_config(config) self._leader_observed_record = {} self._leader_observed_time = None self._leader_resource_version = None self._leader_observed_subsets = [] self._config_resource_version = None self.__do_not_watch = False self._condition = Condition() pods_func = functools.partial(self._api.list_namespaced_pod, self._namespace, label_selector=self._label_selector) self._pods = ObjectCache(self, pods_func, self._retry, self._condition) kinds_func = functools.partial(self._api.list_namespaced_kind, self._namespace, label_selector=self._label_selector) self._kinds = ObjectCache(self, kinds_func, self._retry, self._condition) def retry(self, *args, **kwargs): return self._retry.copy()(*args, **kwargs) def client_path(self, path): return super(Kubernetes, self).client_path(path)[1:].replace('/', '-') @property def leader_path(self): return self._base_path[1:] if self.__subsets else super(Kubernetes, self).leader_path def set_ttl(self, ttl): ttl = int(ttl) self.__do_not_watch = self._ttl != ttl self._ttl = ttl @property def ttl(self): return self._ttl def set_retry_timeout(self, retry_timeout): self._retry.deadline = retry_timeout def reload_config(self, config): super(Kubernetes, self).reload_config(config) self._api.configure_timeouts(self.loop_wait, self._retry.deadline, self.ttl) @staticmethod def member(pod): annotations = pod.metadata.annotations or {} member = Member.from_node(pod.metadata.resource_version, pod.metadata.name, None, annotations.get('status', '')) member.data['pod_labels'] = pod.metadata.labels return member def _wait_caches(self): stop_time = time.time() + self._retry.deadline while not (self._pods.is_ready() and self._kinds.is_ready()): timeout = stop_time - time.time() if timeout <= 0: raise RetryFailedError('Exceeded retry deadline') self._condition.wait(timeout) def _load_cluster(self): try: with self._condition: self._wait_caches() members = [self.member(pod) for pod in self._pods.copy().values()] nodes = self._kinds.copy() config = nodes.get(self.config_path) metadata = config and config.metadata self._config_resource_version = metadata.resource_version if metadata else None annotations = metadata and metadata.annotations or {} # get initialize flag initialize = annotations.get(self._INITIALIZE) # get global dynamic configuration config = ClusterConfig.from_node(metadata and metadata.resource_version, annotations.get(self._CONFIG) or '{}', metadata.resource_version if self._CONFIG in annotations else 0) # get timeline history history = TimelineHistory.from_node(metadata and metadata.resource_version, annotations.get(self._HISTORY) or '[]') leader = nodes.get(self.leader_path) metadata = leader and leader.metadata self._leader_resource_version = metadata.resource_version if metadata else None self._leader_observed_subsets = leader.subsets if self.__subsets and leader and leader.subsets else [] annotations = metadata and metadata.annotations or {} # get last leader operation last_leader_operation = annotations.get(self._OPTIME) last_leader_operation = 0 if last_leader_operation is None else int(last_leader_operation) # get leader leader_record = {n: annotations.get(n) for n in (self._LEADER, 'acquireTime', 'ttl', 'renewTime', 'transitions') if n in annotations} if (leader_record or self._leader_observed_record) and leader_record != self._leader_observed_record: self._leader_observed_record = leader_record self._leader_observed_time = time.time() leader = leader_record.get(self._LEADER) try: ttl = int(leader_record.get('ttl')) or self._ttl except (TypeError, ValueError): ttl = self._ttl if not metadata or not self._leader_observed_time or self._leader_observed_time + ttl < time.time(): leader = None if metadata: member = Member(-1, leader, None, {}) member = ([m for m in members if m.name == leader] or [member])[0] leader = Leader(metadata.resource_version, None, member) # failover key failover = nodes.get(self.failover_path) metadata = failover and failover.metadata failover = Failover.from_node(metadata and metadata.resource_version, metadata and (metadata.annotations or {}).copy()) # get synchronization state sync = nodes.get(self.sync_path) metadata = sync and sync.metadata sync = SyncState.from_node(metadata and metadata.resource_version, metadata and metadata.annotations) return Cluster(initialize, config, leader, last_leader_operation, members, failover, sync, history) except Exception: logger.exception('get_cluster') raise KubernetesError('Kubernetes API is not responding properly') @staticmethod def compare_ports(p1, p2): return p1.name == p2.name and p1.port == p2.port and (p1.protocol or 'TCP') == (p2.protocol or 'TCP') @staticmethod def subsets_changed(last_observed_subsets, subsets): """ >>> Kubernetes.subsets_changed([], []) False >>> Kubernetes.subsets_changed([], [k8s_client.V1EndpointSubset()]) True >>> s1 = [k8s_client.V1EndpointSubset(addresses=[k8s_client.V1EndpointAddress(ip='1.2.3.4')])] >>> s2 = [k8s_client.V1EndpointSubset(addresses=[k8s_client.V1EndpointAddress(ip='1.2.3.5')])] >>> Kubernetes.subsets_changed(s1, s2) True >>> a = [k8s_client.V1EndpointAddress(ip='1.2.3.4')] >>> s1 = [k8s_client.V1EndpointSubset(addresses=a, ports=[k8s_client.V1EndpointPort(protocol='TCP', port=1)])] >>> s2 = [k8s_client.V1EndpointSubset(addresses=a, ports=[k8s_client.V1EndpointPort(port=5432)])] >>> Kubernetes.subsets_changed(s1, s2) True >>> p1 = k8s_client.V1EndpointPort(name='port1', port=1) >>> p2 = k8s_client.V1EndpointPort(name='port2', port=2) >>> p3 = k8s_client.V1EndpointPort(name='port3', port=3) >>> s1 = [k8s_client.V1EndpointSubset(addresses=a, ports=[p1, p2])] >>> s2 = [k8s_client.V1EndpointSubset(addresses=a, ports=[p2, p3])] >>> Kubernetes.subsets_changed(s1, s2) True >>> s2 = [k8s_client.V1EndpointSubset(addresses=a, ports=[p2, p1])] >>> Kubernetes.subsets_changed(s1, s2) False """ if len(last_observed_subsets) != len(subsets): return True if subsets == []: return False if len(last_observed_subsets[0].addresses or []) != 1 or \ last_observed_subsets[0].addresses[0].ip != subsets[0].addresses[0].ip or \ len(last_observed_subsets[0].ports) != len(subsets[0].ports): return True if len(subsets[0].ports) == 1: return not Kubernetes.compare_ports(last_observed_subsets[0].ports[0], subsets[0].ports[0]) observed_ports = {p.name: p for p in last_observed_subsets[0].ports} for p in subsets[0].ports: if p.name not in observed_ports or not Kubernetes.compare_ports(p, observed_ports.pop(p.name)): return True return False @catch_kubernetes_errors def patch_or_create(self, name, annotations, resource_version=None, patch=False, retry=True, subsets=None): metadata = {'namespace': self._namespace, 'name': name, 'labels': self._labels, 'annotations': annotations} if patch or resource_version: if resource_version is not None: metadata['resource_version'] = resource_version func = functools.partial(self._api.patch_namespaced_kind, name) else: func = functools.partial(self._api.create_namespaced_kind) # skip annotations with null values metadata['annotations'] = {k: v for k, v in metadata['annotations'].items() if v is not None} metadata = k8s_client.V1ObjectMeta(**metadata) if subsets is not None and self.__subsets: endpoints = {'metadata': metadata} if self.subsets_changed(self._leader_observed_subsets, subsets): endpoints['subsets'] = subsets body = k8s_client.V1Endpoints(**endpoints) else: body = k8s_client.V1ConfigMap(metadata=metadata) ret = self.retry(func, self._namespace, body) if retry else func(self._namespace, body) if ret: self._kinds.set(name, ret) return ret def patch_or_create_config(self, annotations, resource_version=None, patch=False, retry=True): # SCOPE-config endpoint requires corresponding service otherwise it might be "cleaned" by k8s master if self.__subsets and not patch and not resource_version: self._should_create_config_service = True self._create_config_service() ret = self.patch_or_create(self.config_path, annotations, resource_version, patch, retry) if ret: self._config_resource_version = ret.metadata.resource_version return ret def _create_config_service(self): metadata = k8s_client.V1ObjectMeta(namespace=self._namespace, name=self.config_path, labels=self._labels) body = k8s_client.V1Service(metadata=metadata, spec=k8s_client.V1ServiceSpec(cluster_ip='None')) try: if not self._api.create_namespaced_service(self._namespace, body): return except Exception as e: if not isinstance(e, k8s_client.rest.ApiException) or e.status != 409: # Service already exists return logger.exception('create_config_service failed') self._should_create_config_service = False def _write_leader_optime(self, last_operation): """Unused""" def _update_leader(self): """Unused""" def update_leader(self, last_operation, access_is_restricted=False): now = datetime.datetime.now(tzutc).isoformat() annotations = {self._LEADER: self._name, 'ttl': str(self._ttl), 'renewTime': now, 'acquireTime': self._leader_observed_record.get('acquireTime') or now, 'transitions': self._leader_observed_record.get('transitions') or '0'} if last_operation: annotations[self._OPTIME] = last_operation subsets = [] if access_is_restricted else self.__subsets ret = self.patch_or_create(self.leader_path, annotations, self._leader_resource_version, subsets=subsets) if ret: self._leader_resource_version = ret.metadata.resource_version return ret def attempt_to_acquire_leader(self, permanent=False): now = datetime.datetime.now(tzutc).isoformat() annotations = {self._LEADER: self._name, 'ttl': str(sys.maxsize if permanent else self._ttl), 'renewTime': now, 'acquireTime': now, 'transitions': '0'} if self._leader_observed_record: try: transitions = int(self._leader_observed_record.get('transitions')) except (TypeError, ValueError): transitions = 0 if self._leader_observed_record.get(self._LEADER) != self._name: transitions += 1 else: annotations['acquireTime'] = self._leader_observed_record.get('acquireTime') or now annotations['transitions'] = str(transitions) subsets = [] if self.__subsets else None ret = self.patch_or_create(self.leader_path, annotations, self._leader_resource_version, subsets=subsets) if ret: self._leader_resource_version = ret.metadata.resource_version else: logger.info('Could not take out TTL lock') return ret def take_leader(self): return self.attempt_to_acquire_leader() def set_failover_value(self, value, index=None): """Unused""" def manual_failover(self, leader, candidate, scheduled_at=None, index=None): annotations = {'leader': leader or None, 'member': candidate or None, 'scheduled_at': scheduled_at and scheduled_at.isoformat()} patch = bool(self.cluster and isinstance(self.cluster.failover, Failover) and self.cluster.failover.index) return self.patch_or_create(self.failover_path, annotations, index, bool(index or patch), False) def set_config_value(self, value, index=None): return self.patch_or_create_config({self._CONFIG: value}, index, bool(self._config_resource_version), False) @catch_kubernetes_errors def touch_member(self, data, permanent=False): cluster = self.cluster if cluster and cluster.leader and cluster.leader.name == self._name: role = 'promoted' if data['role'] in ('replica', 'promoted') else 'master' elif data['state'] == 'running' and data['role'] != 'master': role = data['role'] else: role = None member = cluster and cluster.get_member(self._name, fallback_to_leader=False) pod_labels = member and member.data.pop('pod_labels', None) ret = pod_labels is not None and pod_labels.get(self._role_label) == role and deep_compare(data, member.data) if not ret: metadata = {'namespace': self._namespace, 'name': self._name, 'labels': {self._role_label: role}, 'annotations': {'status': json.dumps(data, separators=(',', ':'))}} body = k8s_client.V1Pod(metadata=k8s_client.V1ObjectMeta(**metadata)) ret = self._api.patch_namespaced_pod(self._name, self._namespace, body) if self.__subsets and self._should_create_config_service: self._create_config_service() return ret def initialize(self, create_new=True, sysid=""): cluster = self.cluster resource_version = cluster.config.index if cluster and cluster.config and cluster.config.index else None return self.patch_or_create_config({self._INITIALIZE: sysid}, resource_version) def delete_leader(self): if self.cluster and isinstance(self.cluster.leader, Leader) and self.cluster.leader.name == self._name: self.patch_or_create(self.leader_path, {self._LEADER: None}, self._leader_resource_version, True, False, []) self.reset_cluster() def cancel_initialization(self): self.patch_or_create_config({self._INITIALIZE: None}, self._config_resource_version, True) @catch_kubernetes_errors def delete_cluster(self): self.retry(self._api.delete_collection_namespaced_kind, self._namespace, label_selector=self._label_selector) def set_history_value(self, value): return self.patch_or_create_config({self._HISTORY: value}, None, bool(self._config_resource_version), False) def set_sync_state_value(self, value, index=None): """Unused""" def write_sync_state(self, leader, sync_standby, index=None): return self.patch_or_create(self.sync_path, self.sync_state(leader, sync_standby), index, False) def delete_sync_state(self, index=None): return self.write_sync_state(None, None, index) def watch(self, leader_index, timeout): if self.__do_not_watch: self.__do_not_watch = False return True try: return super(Kubernetes, self).watch(None, timeout) finally: self.event.clear() patroni-1.6.4/patroni/dcs/zookeeper.py000066400000000000000000000354201361356115100200060ustar00rootroot00000000000000import json import logging import select import time from kazoo.client import KazooClient, KazooState, KazooRetry from kazoo.exceptions import NoNodeError, NodeExistsError from kazoo.handlers.threading import SequentialThreadingHandler from patroni.dcs import AbstractDCS, ClusterConfig, Cluster, Failover, Leader, Member, SyncState, TimelineHistory from patroni.exceptions import DCSError from patroni.utils import deep_compare logger = logging.getLogger(__name__) class ZooKeeperError(DCSError): pass class PatroniSequentialThreadingHandler(SequentialThreadingHandler): def __init__(self, connect_timeout): super(PatroniSequentialThreadingHandler, self).__init__() self.set_connect_timeout(connect_timeout) def set_connect_timeout(self, connect_timeout): self._connect_timeout = max(1.0, connect_timeout/2.0) # try to connect to zookeeper node during loop_wait/2 def create_connection(self, *args, **kwargs): """This method is trying to establish connection with one of the zookeeper nodes. Somehow strategy "fail earlier and retry more often" works way better comparing to the original strategy "try to connect with specified timeout". Since we want to try connect to zookeeper more often (with the smaller connect_timeout), he have to override `create_connection` method in the `SequentialThreadingHandler` class (which is used by `kazoo.Client`). :param args: always contains `tuple(host, port)` as the first element and could contain `connect_timeout` (negotiated session timeout) as the second element.""" args = list(args) if len(args) == 0: # kazoo 2.6.0 slightly changed the way how it calls create_connection method kwargs['timeout'] = max(self._connect_timeout, kwargs.get('timeout', self._connect_timeout*10)/10.0) elif len(args) == 1: args.append(self._connect_timeout) else: args[1] = max(self._connect_timeout, args[1]/10.0) return super(PatroniSequentialThreadingHandler, self).create_connection(*args, **kwargs) def select(self, *args, **kwargs): """Python3 raises `ValueError` if socket is closed, because fd == -1""" try: return super(PatroniSequentialThreadingHandler, self).select(*args, **kwargs) except ValueError as e: raise select.error(9, str(e)) class ZooKeeper(AbstractDCS): def __init__(self, config): super(ZooKeeper, self).__init__(config) hosts = config.get('hosts', []) if isinstance(hosts, list): hosts = ','.join(hosts) self._client = KazooClient(hosts, handler=PatroniSequentialThreadingHandler(config['retry_timeout']), timeout=config['ttl'], connection_retry=KazooRetry(max_delay=1, max_tries=-1, sleep_func=time.sleep), command_retry=KazooRetry(deadline=config['retry_timeout'], max_delay=1, max_tries=-1, sleep_func=time.sleep)) self._client.add_listener(self.session_listener) self._fetch_cluster = True self._orig_kazoo_connect = self._client._connection._connect self._client._connection._connect = self._kazoo_connect self._client.start() def _kazoo_connect(self, host, port): """Kazoo is using Ping's to determine health of connection to zookeeper. If there is no response on Ping after Ping interval (1/2 from read_timeout) it will consider current connection dead and try to connect to another node. Without this "magic" it was taking up to 2/3 from session timeout (ttl) to figure out that connection was dead and we had only small time for reconnect and retry. This method is needed to return different value of read_timeout, which is not calculated from negotiated session timeout but from value of `loop_wait`. And it is 2 sec smaller than loop_wait, because we can spend up to 2 seconds when calling `touch_member()` and `write_leader_optime()` methods, which also may hang...""" ret = self._orig_kazoo_connect(host, port) return max(self.loop_wait - 2, 2)*1000, ret[1] def session_listener(self, state): if state in [KazooState.SUSPENDED, KazooState.LOST]: self.cluster_watcher(None) def cluster_watcher(self, event): self._fetch_cluster = True self.event.set() def reload_config(self, config): self.set_retry_timeout(config['retry_timeout']) loop_wait = config['loop_wait'] loop_wait_changed = self._loop_wait != loop_wait self._loop_wait = loop_wait self._client.handler.set_connect_timeout(loop_wait) # We need to reestablish connection to zookeeper if we want to change # read_timeout (and Ping interval respectively), because read_timeout # is calculated in `_kazoo_connect` method. If we are changing ttl at # the same time, set_ttl method will reestablish connection and return # `!True`, otherwise we will close existing connection and let kazoo # open the new one. if not self.set_ttl(int(config['ttl'] * 1000)) and loop_wait_changed: self._client._connection._socket.close() def set_ttl(self, ttl): """It is not possible to change ttl (session_timeout) in zookeeper without destroying old session and creating the new one. This method returns `!True` if session_timeout has been changed (`restart()` has been called).""" if self._client._session_timeout != ttl: self._client._session_timeout = ttl self._client.restart() return True @property def ttl(self): return self._client._session_timeout def set_retry_timeout(self, retry_timeout): retry = self._client.retry if isinstance(self._client.retry, KazooRetry) else self._client._retry retry.deadline = retry_timeout def get_node(self, key, watch=None): try: ret = self._client.get(key, watch) return (ret[0].decode('utf-8'), ret[1]) except NoNodeError: return None @staticmethod def member(name, value, znode): return Member.from_node(znode.version, name, znode.ephemeralOwner, value) def get_children(self, key, watch=None): try: return self._client.get_children(key, watch) except NoNodeError: return [] def load_members(self, sync_standby): members = [] for member in self.get_children(self.members_path, self.cluster_watcher): watch = member == sync_standby and self.cluster_watcher or None data = self.get_node(self.members_path + member, watch) if data is not None: members.append(self.member(member, *data)) return members def _inner_load_cluster(self): self._fetch_cluster = False self.event.clear() nodes = set(self.get_children(self.client_path(''), self.cluster_watcher)) if not nodes: self._fetch_cluster = True # get initialize flag initialize = (self.get_node(self.initialize_path) or [None])[0] if self._INITIALIZE in nodes else None # get global dynamic configuration config = self.get_node(self.config_path, watch=self.cluster_watcher) if self._CONFIG in nodes else None config = config and ClusterConfig.from_node(config[1].version, config[0], config[1].mzxid) # get timeline history history = self.get_node(self.history_path, watch=self.cluster_watcher) if self._HISTORY in nodes else None history = history and TimelineHistory.from_node(history[1].mzxid, history[0]) # get last leader operation last_leader_operation = self._OPTIME in nodes and self._fetch_cluster and self.get_node(self.leader_optime_path) last_leader_operation = last_leader_operation and int(last_leader_operation[0]) or 0 # get synchronization state sync = self.get_node(self.sync_path, watch=self.cluster_watcher) if self._SYNC in nodes else None sync = SyncState.from_node(sync and sync[1].version, sync and sync[0]) # get list of members sync_standby = sync.leader == self._name and sync.sync_standby or None members = self.load_members(sync_standby) if self._MEMBERS[:-1] in nodes else [] # get leader leader = self.get_node(self.leader_path) if self._LEADER in nodes else None if leader: client_id = self._client.client_id if not self._ctl and leader[0] == self._name and client_id is not None \ and client_id[0] != leader[1].ephemeralOwner: logger.info('I am leader but not owner of the session. Removing leader node') self._client.delete(self.leader_path) leader = None if leader: member = Member(-1, leader[0], None, {}) member = ([m for m in members if m.name == leader[0]] or [member])[0] leader = Leader(leader[1].version, leader[1].ephemeralOwner, member) self._fetch_cluster = member.index == -1 # failover key failover = self.get_node(self.failover_path, watch=self.cluster_watcher) if self._FAILOVER in nodes else None failover = failover and Failover.from_node(failover[1].version, failover[0]) return Cluster(initialize, config, leader, last_leader_operation, members, failover, sync, history) def _load_cluster(self): cluster = self.cluster if self._fetch_cluster or cluster is None: try: cluster = self._client.retry(self._inner_load_cluster) except Exception: logger.exception('get_cluster') self.cluster_watcher(None) raise ZooKeeperError('ZooKeeper in not responding properly') return cluster def _create(self, path, value, retry=False, ephemeral=False): try: if retry: self._client.retry(self._client.create, path, value, makepath=True, ephemeral=ephemeral) else: self._client.create_async(path, value, makepath=True, ephemeral=ephemeral).get(timeout=1) return True except Exception: logger.exception('Failed to create %s', path) return False def attempt_to_acquire_leader(self, permanent=False): ret = self._create(self.leader_path, self._name.encode('utf-8'), retry=True, ephemeral=not permanent) if not ret: logger.info('Could not take out TTL lock') return ret def _set_or_create(self, key, value, index=None, retry=False, do_not_create_empty=False): value = value.encode('utf-8') try: if retry: self._client.retry(self._client.set, key, value, version=index or -1) else: self._client.set_async(key, value, version=index or -1).get(timeout=1) return True except NoNodeError: if do_not_create_empty and not value: return True elif index is None: return self._create(key, value, retry) else: return False except Exception: logger.exception('Failed to update %s', key) return False def set_failover_value(self, value, index=None): return self._set_or_create(self.failover_path, value, index) def set_config_value(self, value, index=None): return self._set_or_create(self.config_path, value, index, retry=True) def initialize(self, create_new=True, sysid=""): sysid = sysid.encode('utf-8') return self._create(self.initialize_path, sysid, retry=True) if create_new \ else self._client.retry(self._client.set, self.initialize_path, sysid) def touch_member(self, data, permanent=False): cluster = self.cluster member = cluster and cluster.get_member(self._name, fallback_to_leader=False) encoded_data = json.dumps(data, separators=(',', ':')).encode('utf-8') if member and (self._client.client_id is not None and member.session != self._client.client_id[0] or not (deep_compare(member.data.get('tags', {}), data.get('tags', {})) and member.data.get('version') == data.get('version') and member.data.get('checkpoint_after_promote') == data.get('checkpoint_after_promote'))): try: self._client.delete_async(self.member_path).get(timeout=1) except NoNodeError: pass except Exception: return False member = None if member: if deep_compare(data, member.data): return True else: try: self._client.create_async(self.member_path, encoded_data, makepath=True, ephemeral=not permanent).get(timeout=1) return True except Exception as e: if not isinstance(e, NodeExistsError): logger.exception('touch_member') return False try: self._client.set_async(self.member_path, encoded_data).get(timeout=1) return True except Exception: logger.exception('touch_member') return False def take_leader(self): return self.attempt_to_acquire_leader() def _write_leader_optime(self, last_operation): return self._set_or_create(self.leader_optime_path, last_operation) def _update_leader(self): return True def delete_leader(self): self._client.restart() return True def _cancel_initialization(self): node = self.get_node(self.initialize_path) if node: self._client.delete(self.initialize_path, version=node[1].version) def cancel_initialization(self): try: self._client.retry(self._cancel_initialization) except Exception: logger.exception("Unable to delete initialize key") def delete_cluster(self): try: return self._client.retry(self._client.delete, self.client_path(''), recursive=True) except NoNodeError: return True def set_history_value(self, value): return self._set_or_create(self.history_path, value) def set_sync_state_value(self, value, index=None): return self._set_or_create(self.sync_path, value, index, retry=True, do_not_create_empty=True) def delete_sync_state(self, index=None): return self.set_sync_state_value("{}", index) def watch(self, leader_index, timeout): if super(ZooKeeper, self).watch(leader_index, timeout): self._fetch_cluster = True return self._fetch_cluster patroni-1.6.4/patroni/exceptions.py000066400000000000000000000011311361356115100174030ustar00rootroot00000000000000class PatroniException(Exception): """Parent class for all kind of exceptions related to selected distributed configuration store""" def __init__(self, value): self.value = value def __str__(self): """ >>> str(PatroniException('foo')) "'foo'" """ return repr(self.value) class PostgresException(PatroniException): pass class DCSError(PatroniException): pass class PostgresConnectionException(PostgresException): pass class WatchdogError(PatroniException): pass class ConfigParseError(PatroniException): pass patroni-1.6.4/patroni/ha.py000066400000000000000000002165531361356115100156320ustar00rootroot00000000000000import datetime import functools import json import logging import psycopg2 import sys import time import uuid from collections import namedtuple from multiprocessing.pool import ThreadPool from patroni.async_executor import AsyncExecutor, CriticalTask from patroni.exceptions import DCSError, PostgresConnectionException, PatroniException from patroni.postgresql import ACTION_ON_START, ACTION_ON_ROLE_CHANGE from patroni.postgresql.misc import postgres_version_to_int from patroni.postgresql.rewind import Rewind from patroni.utils import polling_loop, tzutc, is_standby_cluster as _is_standby_cluster from patroni.dcs import RemoteMember from threading import RLock logger = logging.getLogger(__name__) class _MemberStatus(namedtuple('_MemberStatus', ['member', 'reachable', 'in_recovery', 'timeline', 'wal_position', 'tags', 'watchdog_failed'])): """Node status distilled from API response: member - dcs.Member object of the node reachable - `!False` if the node is not reachable or is not responding with correct JSON in_recovery - `!True` if pg_is_in_recovery() == true timeline - timeline value from JSON wal_position - maximum value of `replayed_location` or `received_location` from JSON tags - dictionary with values of different tags (i.e. nofailover) watchdog_failed - indicates that watchdog is required by configuration but not available or failed """ @classmethod def from_api_response(cls, member, json): is_master = json['role'] == 'master' timeline = json.get('timeline', 0) wal = not is_master and max(json['xlog'].get('received_location', 0), json['xlog'].get('replayed_location', 0)) return cls(member, True, not is_master, timeline, wal, json.get('tags', {}), json.get('watchdog_failed', False)) @classmethod def unknown(cls, member): return cls(member, False, None, 0, 0, {}, False) def failover_limitation(self): """Returns reason why this node can't promote or None if everything is ok.""" if not self.reachable: return 'not reachable' if self.tags.get('nofailover', False): return 'not allowed to promote' if self.watchdog_failed: return 'not watchdog capable' return None class Ha(object): def __init__(self, patroni): self.patroni = patroni self.state_handler = patroni.postgresql self._rewind = Rewind(self.state_handler) self.dcs = patroni.dcs self.cluster = None self.old_cluster = None self._is_leader = False self._is_leader_lock = RLock() self._leader_access_is_restricted = False self._was_paused = False self._leader_timeline = None self.recovering = False self._post_bootstrap_task = None self._crash_recovery_executed = False self._start_timeout = None self._async_executor = AsyncExecutor(self.state_handler.cancellable, self.wakeup) self.watchdog = patroni.watchdog # Each member publishes various pieces of information to the DCS using touch_member. This lock protects # the state and publishing procedure to have consistent ordering and avoid publishing stale values. self._member_state_lock = RLock() # Count of concurrent sync disabling requests. Value above zero means that we don't want to be synchronous # standby. Changes protected by _member_state_lock. self._disable_sync = 0 # We need following property to avoid shutdown of postgres when join of Patroni to the postgres # already running as replica was aborted due to cluster not beeing initialized in DCS. self._join_aborted = False def check_mode(self, mode): # Try to protect from the case when DCS was wiped out during pause if self.cluster and self.cluster.config and self.cluster.config.modify_index: return self.cluster.check_mode(mode) else: return self.patroni.config.check_mode(mode) def is_paused(self): return self.check_mode('pause') def check_timeline(self): return self.check_mode('check_timeline') def get_standby_cluster_config(self): if self.cluster and self.cluster.config and self.cluster.config.modify_index: config = self.cluster.config.data else: config = self.patroni.config.dynamic_configuration return config.get('standby_cluster') def is_standby_cluster(self): return _is_standby_cluster(self.get_standby_cluster_config()) def is_leader(self): with self._is_leader_lock: return self._is_leader > time.time() and not self._leader_access_is_restricted def set_is_leader(self, value): with self._is_leader_lock: self._is_leader = time.time() + self.dcs.ttl if value else 0 def set_leader_access_is_restricted(self, value): with self._is_leader_lock: self._leader_access_is_restricted = value def load_cluster_from_dcs(self): cluster = self.dcs.get_cluster() # We want to keep the state of cluster when it was healthy if not cluster.is_unlocked() or not self.old_cluster: self.old_cluster = cluster self.cluster = cluster if not self.has_lock(False): self.set_is_leader(False) self._leader_timeline = None if cluster.is_unlocked() else cluster.leader.timeline def acquire_lock(self): self.set_leader_access_is_restricted(self.cluster.has_permanent_logical_slots(self.state_handler.name)) ret = self.dcs.attempt_to_acquire_leader() self.set_is_leader(ret) return ret def update_lock(self, write_leader_optime=False): last_operation = None if write_leader_optime: try: last_operation = self.state_handler.last_operation() except Exception: logger.exception('Exception when called state_handler.last_operation()') ret = self.dcs.update_leader(last_operation, self._leader_access_is_restricted) self.set_is_leader(ret) if ret: self.watchdog.keepalive() return ret def has_lock(self, info=True): lock_owner = self.cluster.leader and self.cluster.leader.name if info: logger.info('Lock owner: %s; I am %s', lock_owner, self.state_handler.name) return lock_owner == self.state_handler.name def get_effective_tags(self): """Return configuration tags merged with dynamically applied tags.""" tags = self.patroni.tags.copy() # _disable_sync could be modified concurrently, but we don't care as attribute get and set are atomic. if self._disable_sync > 0: tags['nosync'] = True return tags def touch_member(self): with self._member_state_lock: data = { 'conn_url': self.state_handler.connection_string, 'api_url': self.patroni.api.connection_string, 'state': self.state_handler.state, 'role': self.state_handler.role, 'version': self.patroni.version } # following two lines are mainly necessary for consul, to avoid creation of master service if data['role'] == 'master' and not self.is_leader(): data['role'] = 'promoted' if self.is_leader() and not self._rewind.checkpoint_after_promote(): data['checkpoint_after_promote'] = False tags = self.get_effective_tags() if tags: data['tags'] = tags if self.state_handler.pending_restart: data['pending_restart'] = True if self._async_executor.scheduled_action in (None, 'promote') \ and data['state'] in ['running', 'restarting', 'starting']: try: timeline, wal_position, pg_control_timeline = self.state_handler.timeline_wal_position() data['xlog_location'] = wal_position if not timeline: # So far the only way to get the current timeline on the standby is from # the replication connection. In order to avoid opening the replication # connection on every iteration of HA loop we will do it only when noticed # that the timeline on the primary has changed. # Unfortunately such optimization isn't possible on the standby_leader, # therefore we will get the timeline from pg_control, either by calling # pg_control_checkpoint() on 9.6+ or by parsing the output of pg_controldata. if self.state_handler.role == 'standby_leader': timeline = pg_control_timeline or self.state_handler.pg_control_timeline() else: timeline = self.state_handler.replica_cached_timeline(timeline) if timeline: data['timeline'] = timeline except Exception: pass if self.patroni.scheduled_restart: scheduled_restart_data = self.patroni.scheduled_restart.copy() scheduled_restart_data['schedule'] = scheduled_restart_data['schedule'].isoformat() data['scheduled_restart'] = scheduled_restart_data if self.is_paused(): data['pause'] = True return self.dcs.touch_member(data) def clone(self, clone_member=None, msg='(without leader)'): if self.is_standby_cluster() and not isinstance(clone_member, RemoteMember): clone_member = self.get_remote_member(clone_member) self._rewind.reset_state() if self.state_handler.bootstrap.clone(clone_member): logger.info('bootstrapped %s', msg) cluster = self.dcs.get_cluster() node_to_follow = self._get_node_to_follow(cluster) return self.state_handler.follow(node_to_follow) else: logger.error('failed to bootstrap %s', msg) self.state_handler.remove_data_directory() def bootstrap(self): if not self.cluster.is_unlocked(): # cluster already has leader clone_member = self.cluster.get_clone_member(self.state_handler.name) member_role = 'leader' if clone_member == self.cluster.leader else 'replica' msg = "from {0} '{1}'".format(member_role, clone_member.name) ret = self._async_executor.try_run_async('bootstrap {0}'.format(msg), self.clone, args=(clone_member, msg)) return ret or 'trying to bootstrap {0}'.format(msg) # no initialize key and node is allowed to be master and has 'bootstrap' section in a configuration file elif self.cluster.initialize is None and not self.patroni.nofailover and 'bootstrap' in self.patroni.config: if self.dcs.initialize(create_new=True): # race for initialization self.state_handler.bootstrapping = True self._post_bootstrap_task = CriticalTask() if self.is_standby_cluster(): ret = self._async_executor.try_run_async('bootstrap_standby_leader', self.bootstrap_standby_leader) return ret or 'trying to bootstrap a new standby leader' else: ret = self._async_executor.try_run_async('bootstrap', self.state_handler.bootstrap.bootstrap, args=(self.patroni.config['bootstrap'],)) return ret or 'trying to bootstrap a new cluster' else: return 'failed to acquire initialize lock' else: create_replica_methods = self.get_standby_cluster_config().get('create_replica_methods', []) \ if self.is_standby_cluster() else None if self.state_handler.can_create_replica_without_replication_connection(create_replica_methods): msg = 'bootstrap (without leader)' return self._async_executor.try_run_async(msg, self.clone) or 'trying to ' + msg return 'waiting for {0}leader to bootstrap'.format('standby_' if self.is_standby_cluster() else '') def bootstrap_standby_leader(self): """ If we found 'standby' key in the configuration, we need to bootstrap not a real master, but a 'standby leader', that will take base backup from a remote master and start follow it. """ clone_source = self.get_remote_master() msg = 'clone from remote master {0}'.format(clone_source.conn_url) result = self.clone(clone_source, msg) self._post_bootstrap_task.complete(result) if result: self.state_handler.set_role('standby_leader') return result def _handle_rewind_or_reinitialize(self): leader = self.get_remote_master() if self.is_standby_cluster() else self.cluster.leader if not self._rewind.rewind_or_reinitialize_needed_and_possible(leader): return None if self._rewind.can_rewind: msg = 'running pg_rewind from ' + leader.name return self._async_executor.try_run_async(msg, self._rewind.execute, args=(leader,)) or msg # remove_data_directory_on_diverged_timelines is set if not self.is_standby_cluster(): msg = 'reinitializing due to diverged timelines' return self._async_executor.try_run_async(msg, self._do_reinitialize, args=(self.cluster,)) or msg def recover(self): # Postgres is not running and we will restart in standby mode. Watchdog is not needed until we promote. self.watchdog.disable() if self.has_lock() and self.update_lock(): timeout = self.patroni.config['master_start_timeout'] if timeout == 0: # We are requested to prefer failing over to restarting master. But see first if there # is anyone to fail over to. members = self.cluster.members if self.is_synchronous_mode(): members = [m for m in members if self.cluster.sync.matches(m.name)] if self.is_failover_possible(members): logger.info("Master crashed. Failing over.") self.demote('immediate') return 'stopped PostgreSQL to fail over after a crash' else: timeout = None data = self.state_handler.controldata() logger.info('pg_controldata:\n%s\n', '\n'.join(' {0}: {1}'.format(k, v) for k, v in data.items())) if data.get('Database cluster state') in ('in production', 'shutting down', 'in crash recovery') \ and not self._crash_recovery_executed and \ (self.cluster.is_unlocked() or self._rewind.can_rewind): self._crash_recovery_executed = True msg = 'doing crash recovery in a single user mode' return self._async_executor.try_run_async(msg, self.state_handler.fix_cluster_state) or msg self.load_cluster_from_dcs() role = 'replica' if self.is_standby_cluster() or not self.has_lock(): if not self._rewind.executed: self._rewind.trigger_check_diverged_lsn() msg = self._handle_rewind_or_reinitialize() if msg: return msg if self.has_lock(): # in standby cluster msg = "starting as a standby leader because i had the session lock" role = 'standby_leader' node_to_follow = self._get_node_to_follow(self.cluster) elif self.is_standby_cluster() and self.cluster.is_unlocked(): msg = "trying to follow a remote master because standby cluster is unhealthy" node_to_follow = self.get_remote_master() else: msg = "starting as a secondary" node_to_follow = self._get_node_to_follow(self.cluster) elif self.has_lock(): msg = "starting as readonly because i had the session lock" node_to_follow = None if self._async_executor.try_run_async('restarting after failure', self.state_handler.follow, args=(node_to_follow, role, timeout)) is None: self.recovering = True return msg def _get_node_to_follow(self, cluster): # determine the node to follow. If replicatefrom tag is set, # try to follow the node mentioned there, otherwise, follow the leader. standby_config = self.get_standby_cluster_config() is_standby_cluster = _is_standby_cluster(standby_config) if is_standby_cluster and (self.cluster.is_unlocked() or self.has_lock(False)): node_to_follow = self.get_remote_master() elif self.patroni.replicatefrom and self.patroni.replicatefrom != self.state_handler.name: node_to_follow = cluster.get_member(self.patroni.replicatefrom) else: node_to_follow = cluster.leader node_to_follow = node_to_follow if node_to_follow and node_to_follow.name != self.state_handler.name else None if node_to_follow and not isinstance(node_to_follow, RemoteMember): # we are going to abuse Member.data to pass following parameters params = ('restore_command', 'archive_cleanup_command') for param in params: # It is highly unlikely to happen, but we want to protect from the case node_to_follow.data.pop(param, None) # when above-mentioned params came from outside. if is_standby_cluster: node_to_follow.data.update({p: standby_config[p] for p in params if standby_config.get(p)}) return node_to_follow def follow(self, demote_reason, follow_reason, refresh=True): if refresh: self.load_cluster_from_dcs() is_leader = self.state_handler.is_leader() node_to_follow = self._get_node_to_follow(self.cluster) if self.is_paused(): if not (self._rewind.is_needed and self._rewind.can_rewind_or_reinitialize_allowed)\ or self.cluster.is_unlocked(): self.state_handler.set_role('master' if is_leader else 'replica') if is_leader: return 'continue to run as master without lock' elif not node_to_follow: return 'no action' elif is_leader: self.demote('immediate-nolock') return demote_reason msg = self._handle_rewind_or_reinitialize() if msg: return msg role = 'standby_leader' if isinstance(node_to_follow, RemoteMember) and self.has_lock(False) else 'replica' # It might happen that leader key in the standby cluster references non-exiting member. # In this case it is safe to continue running without changing recovery.conf if self.is_standby_cluster() and role == 'replica' and not (node_to_follow and node_to_follow.conn_url): return 'continue following the old known standby leader' else: change_required, restart_required = self.state_handler.config.check_recovery_conf(node_to_follow) if change_required: if restart_required: self._async_executor.try_run_async('changing primary_conninfo and restarting', self.state_handler.follow, args=(node_to_follow, role)) else: self.state_handler.follow(node_to_follow, role, do_reload=True) elif role == 'standby_leader' and self.state_handler.role != role: self.state_handler.set_role(role) self.state_handler.call_nowait(ACTION_ON_ROLE_CHANGE) return follow_reason def is_synchronous_mode(self): return self.check_mode('synchronous_mode') def is_synchronous_mode_strict(self): return self.check_mode('synchronous_mode_strict') def process_sync_replication(self): """Process synchronous standby beahvior. Synchronous standbys are registered in two places postgresql.conf and DCS. The order of updating them must be right. The invariant that should be kept is that if a node is master and sync_standby is set in DCS, then that node must have synchronous_standby set to that value. Or more simple, first set in postgresql.conf and then in DCS. When removing, first remove in DCS, then in postgresql.conf. This is so we only consider promoting standbys that were guaranteed to be replicating synchronously. """ if self.is_synchronous_mode(): current = self.cluster.sync.leader and self.cluster.sync.sync_standby picked, allow_promote = self.state_handler.pick_synchronous_standby(self.cluster) if picked != current: # We need to revoke privilege from current before replacing it in the config if current: logger.info("Removing synchronous privilege from %s", current) if not self.dcs.write_sync_state(self.state_handler.name, None, index=self.cluster.sync.index): logger.info('Synchronous replication key updated by someone else.') return if self.is_synchronous_mode_strict() and picked is None: picked = '*' logger.warning("No standbys available!") logger.info("Assigning synchronous standby status to %s", picked) self.state_handler.config.set_synchronous_standby(picked) if picked and picked != '*' and not allow_promote: # Wait for PostgreSQL to enable synchronous mode and see if we can immediately set sync_standby time.sleep(2) picked, allow_promote = self.state_handler.pick_synchronous_standby(self.cluster) if allow_promote: try: cluster = self.dcs.get_cluster() except DCSError: return logger.warning("Could not get cluster state from DCS during process_sync_replication()") if cluster.sync.leader and cluster.sync.leader != self.state_handler.name: logger.info("Synchronous replication key updated by someone else") return if not self.dcs.write_sync_state(self.state_handler.name, picked, index=cluster.sync.index): logger.info("Synchronous replication key updated by someone else") return logger.info("Synchronous standby status assigned to %s", picked) else: if self.cluster.sync.leader and self.dcs.delete_sync_state(index=self.cluster.sync.index): logger.info("Disabled synchronous replication") self.state_handler.config.set_synchronous_standby(None) def is_sync_standby(self, cluster): return cluster.leader and cluster.sync.leader == cluster.leader.name \ and cluster.sync.sync_standby == self.state_handler.name def while_not_sync_standby(self, func): """Runs specified action while trying to make sure that the node is not assigned synchronous standby status. Tags us as not allowed to be a sync standby as we are going to go away, if we currently are wait for leader to notice and pick an alternative one or if the leader changes or goes away we are also free. If the connection to DCS fails we run the action anyway, as this is only a hint. There is a small race window where this function runs between a master picking us the sync standby and publishing it to the DCS. As the window is rather tiny consequences are holding up commits for one cycle period we don't worry about it here.""" if not self.is_synchronous_mode() or self.patroni.nosync: return func() with self._member_state_lock: self._disable_sync += 1 try: if self.touch_member(): # Master should notice the updated value during the next cycle. We will wait double that, if master # hasn't noticed the value by then not disabling sync replication is not likely to matter. for _ in polling_loop(timeout=self.dcs.loop_wait*2, interval=2): try: if not self.is_sync_standby(self.dcs.get_cluster()): break except DCSError: logger.warning("Could not get cluster state, skipping synchronous standby disable") break logger.info("Waiting for master to release us from synchronous standby") else: logger.warning("Updating member state failed, skipping synchronous standby disable") return func() finally: with self._member_state_lock: self._disable_sync -= 1 def update_cluster_history(self): master_timeline = self.state_handler.get_master_timeline() cluster_history = self.cluster.history and self.cluster.history.lines if master_timeline == 1: if cluster_history: self.dcs.set_history_value('[]') elif not cluster_history or cluster_history[-1][0] != master_timeline - 1 or len(cluster_history[-1]) != 4: cluster_history = {l[0]: l for l in cluster_history or []} history = self.state_handler.get_history(master_timeline) if history: for line in history: # enrich current history with promotion timestamps stored in DCS if len(line) == 3 and line[0] in cluster_history \ and len(cluster_history[line[0]]) == 4 \ and cluster_history[line[0]][1] == line[1]: line.append(cluster_history[line[0]][3]) self.dcs.set_history_value(json.dumps(history, separators=(',', ':'))) def enforce_follow_remote_master(self, message): demote_reason = 'cannot be a real master in standby cluster' return self.follow(demote_reason, message) def enforce_master_role(self, message, promote_message): if not self.is_paused() and not self.watchdog.is_running and not self.watchdog.activate(): if self.state_handler.is_leader(): self.demote('immediate') return 'Demoting self because watchdog could not be activated' else: self.release_leader_key_voluntarily() return 'Not promoting self because watchdog could not be activated' if self.state_handler.is_leader(): # Inform the state handler about its master role. # It may be unaware of it if postgres is promoted manually. self.state_handler.set_role('master') self.process_sync_replication() self.update_cluster_history() return message elif self.state_handler.role == 'master': self.process_sync_replication() return message else: if self.is_synchronous_mode(): # Just set ourselves as the authoritative source of truth for now. We don't want to wait for standbys # to connect. We will try finding a synchronous standby in the next cycle. if not self.dcs.write_sync_state(self.state_handler.name, None, index=self.cluster.sync.index): # Somebody else updated sync state, it may be due to us losing the lock. To be safe, postpone # promotion until next cycle. TODO: trigger immediate retry of run_cycle return 'Postponing promotion because synchronous replication state was updated by somebody else' self.state_handler.config.set_synchronous_standby('*' if self.is_synchronous_mode_strict() else None) if self.state_handler.role != 'master': self.set_leader_access_is_restricted(self.cluster.has_permanent_logical_slots(self.state_handler.name)) def on_success(): self._rewind.reset_state() logger.info("cleared rewind state after becoming the leader") self._async_executor.try_run_async('promote', self.state_handler.promote, args=(self.dcs.loop_wait, on_success, self._leader_access_is_restricted)) return promote_message def fetch_node_status(self, member): """This function perform http get request on member.api_url and fetches its status :returns: `_MemberStatus` object """ try: response = self.patroni.request(member, timeout=2, retries=0) data = response.data.decode('utf-8') logger.info('Got response from %s %s: %s', member.name, member.api_url, data) return _MemberStatus.from_api_response(member, json.loads(data)) except Exception as e: logger.warning("Request failed to %s: GET %s (%s)", member.name, member.api_url, e) return _MemberStatus.unknown(member) def fetch_nodes_statuses(self, members): pool = ThreadPool(len(members)) results = pool.map(self.fetch_node_status, members) # Run API calls on members in parallel pool.close() pool.join() return results def is_lagging(self, wal_position): """Returns if instance with an wal should consider itself unhealthy to be promoted due to replication lag. :param wal_position: Current wal position. :returns True when node is lagging """ lag = (self.cluster.last_leader_operation or 0) - wal_position return lag > self.patroni.config.get('maximum_lag_on_failover', 0) def _is_healthiest_node(self, members, check_replication_lag=True): """This method tries to determine whether I am healthy enough to became a new leader candidate or not.""" # We don't call `last_operation()` here because it returns a string _, my_wal_position, _ = self.state_handler.timeline_wal_position() if check_replication_lag and self.is_lagging(my_wal_position): logger.info('My wal position exceeds maximum replication lag') return False # Too far behind last reported wal position on master if not self.is_standby_cluster() and self.check_timeline(): cluster_timeline = self.cluster.timeline my_timeline = self.state_handler.replica_cached_timeline(cluster_timeline) if my_timeline < cluster_timeline: logger.info('My timeline %s is behind last known cluster timeline %s', my_timeline, cluster_timeline) return False # Prepare list of nodes to run check against members = [m for m in members if m.name != self.state_handler.name and not m.nofailover and m.api_url] if members: for st in self.fetch_nodes_statuses(members): if st.failover_limitation() is None: if not st.in_recovery: logger.warning('Master (%s) is still alive', st.member.name) return False if my_wal_position < st.wal_position: logger.info('Wal position of %s is ahead of my wal position', st.member.name) # In synchronous mode the former leader might be still accessible and even be ahead of us. # We should not disqualify himself from the leader race in such a situation. if not self.is_synchronous_mode() or st.member.name != self.cluster.sync.leader: return False logger.info('Ignoring the former leader being ahead of us') return True def is_failover_possible(self, members): ret = False cluster_timeline = self.cluster.timeline members = [m for m in members if m.name != self.state_handler.name and not m.nofailover and m.api_url] if members: for st in self.fetch_nodes_statuses(members): not_allowed_reason = st.failover_limitation() if not_allowed_reason: logger.info('Member %s is %s', st.member.name, not_allowed_reason) elif self.is_lagging(st.wal_position): logger.info('Member %s exceeds maximum replication lag', st.member.name) elif self.check_timeline() and (not st.timeline or st.timeline < cluster_timeline): logger.info('Timeline %s of member %s is behind the cluster timeline %s', st.timeline, st.member.name, cluster_timeline) else: ret = True else: logger.warning('manual failover: members list is empty') return ret def manual_failover_process_no_leader(self): failover = self.cluster.failover if failover.candidate: # manual failover to specific member if failover.candidate == self.state_handler.name: # manual failover to me return True elif self.is_paused(): # Remove failover key if the node to failover has terminated to avoid waiting for it indefinitely # In order to avoid attempts to delete this key from all nodes only the master is allowed to do it. if (not self.cluster.get_member(failover.candidate, fallback_to_leader=False) and self.state_handler.is_leader()): logger.warning("manual failover: removing failover key because failover candidate is not running") self.dcs.manual_failover('', '', index=self.cluster.failover.index) return None return False # find specific node and check that it is healthy member = self.cluster.get_member(failover.candidate, fallback_to_leader=False) if member: st = self.fetch_node_status(member) not_allowed_reason = st.failover_limitation() if not_allowed_reason is None: # node is healthy logger.info('manual failover: to %s, i am %s', st.member.name, self.state_handler.name) return False # we wanted to failover to specific member but it is not healthy logger.warning('manual failover: member %s is %s', st.member.name, not_allowed_reason) # at this point we should consider all members as a candidates for failover # i.e. we assume that failover.candidate is None elif self.is_paused(): return False # try to pick some other members to failover and check that they are healthy if failover.leader: if self.state_handler.name == failover.leader: # I was the leader # exclude me and desired member which is unhealthy (failover.candidate can be None) members = [m for m in self.cluster.members if m.name not in (failover.candidate, failover.leader)] if self.is_failover_possible(members): # check that there are healthy members return False else: # I was the leader and it looks like currently I am the only healthy member return True # at this point we assume that our node is a candidate for a failover among all nodes except former leader # exclude former leader from the list (failover.leader can be None) members = [m for m in self.cluster.members if m.name != failover.leader] return self._is_healthiest_node(members, check_replication_lag=False) def is_healthiest_node(self): if self.is_paused() and not self.patroni.nofailover and \ self.cluster.failover and not self.cluster.failover.scheduled_at: ret = self.manual_failover_process_no_leader() if ret is not None: # continue if we just deleted the stale failover key as a master return ret if self.state_handler.is_starting(): # postgresql still starting up is unhealthy return False if self.state_handler.is_leader(): # leader is always the healthiest return True if self.is_paused(): return False if self.patroni.nofailover: # nofailover tag makes node always unhealthy return False if self.cluster.failover: return self.manual_failover_process_no_leader() if not self.watchdog.is_healthy: return False # When in sync mode, only last known master and sync standby are allowed to promote automatically. all_known_members = self.cluster.members + self.old_cluster.members if self.is_synchronous_mode() and self.cluster.sync.leader: if not self.cluster.sync.matches(self.state_handler.name): return False # pick between synchronous candidates so we minimize unnecessary failovers/demotions members = {m.name: m for m in all_known_members if self.cluster.sync.matches(m.name)} else: # run usual health check members = {m.name: m for m in all_known_members} return self._is_healthiest_node(members.values()) def _delete_leader(self): self.set_is_leader(False) self.dcs.delete_leader() self.dcs.reset_cluster() def release_leader_key_voluntarily(self): self._delete_leader() self.touch_member() logger.info("Leader key released") def demote(self, mode): """Demote PostgreSQL running as master. :param mode: One of offline, graceful or immediate. offline is used when connection to DCS is not available. graceful is used when failing over to another node due to user request. May only be called running async. immediate is used when we determine that we are not suitable for master and want to failover quickly without regard for data durability. May only be called synchronously. immediate-nolock is used when find out that we have lost the lock to be master. Need to bring down PostgreSQL as quickly as possible without regard for data durability. May only be called synchronously. """ mode_control = { 'offline': dict(stop='fast', checkpoint=False, release=False, offline=True, async_req=False), 'graceful': dict(stop='fast', checkpoint=True, release=True, offline=False, async_req=False), 'immediate': dict(stop='immediate', checkpoint=False, release=True, offline=False, async_req=True), 'immediate-nolock': dict(stop='immediate', checkpoint=False, release=False, offline=False, async_req=True), }[mode] self._rewind.trigger_check_diverged_lsn() self.state_handler.stop(mode_control['stop'], checkpoint=mode_control['checkpoint'], on_safepoint=self.watchdog.disable if self.watchdog.is_running else None) self.state_handler.set_role('demoted') self.set_is_leader(False) if mode_control['release']: with self._async_executor: self.release_leader_key_voluntarily() time.sleep(2) # Give a time to somebody to take the leader lock if mode_control['offline']: node_to_follow, leader = None, None else: cluster = self.dcs.get_cluster() node_to_follow, leader = self._get_node_to_follow(cluster), cluster.leader # FIXME: with mode offline called from DCS exception handler and handle_long_action_in_progress # there could be an async action already running, calling follow from here will lead # to racy state handler state updates. if mode_control['async_req']: self._async_executor.try_run_async('starting after demotion', self.state_handler.follow, (node_to_follow,)) else: if self.is_synchronous_mode(): self.state_handler.config.set_synchronous_standby(None) if self._rewind.rewind_or_reinitialize_needed_and_possible(leader): return False # do not start postgres, but run pg_rewind on the next iteration self.state_handler.follow(node_to_follow) def should_run_scheduled_action(self, action_name, scheduled_at, cleanup_fn): if scheduled_at and not self.is_paused(): # If the scheduled action is in the far future, we shouldn't do anything and just return. # If the scheduled action is in the past, we consider the value to be stale and we remove # the value. # If the value is close to now, we initiate the scheduled action # Additionally, if the scheduled action cannot be executed altogether, i.e. there is an error # or the action is in the past - we take care of cleaning it up. now = datetime.datetime.now(tzutc) try: delta = (scheduled_at - now).total_seconds() if delta > self.dcs.loop_wait: logger.info('Awaiting %s at %s (in %.0f seconds)', action_name, scheduled_at.isoformat(), delta) return False elif delta < - int(self.dcs.loop_wait * 1.5): # This means that if run_cycle gets delayed for 2.5x loop_wait we skip the # scheduled action. Probably not a problem, if things are that bad we don't # want to be restarting or failing over anyway. logger.warning('Found a stale %s value, cleaning up: %s', action_name, scheduled_at.isoformat()) cleanup_fn() return False # The value is very close to now time.sleep(max(delta, 0)) logger.info('Manual scheduled {0} at %s'.format(action_name), scheduled_at.isoformat()) return True except TypeError: logger.warning('Incorrect value of scheduled_at: %s', scheduled_at) cleanup_fn() return False def process_manual_failover_from_leader(self): """Checks if manual failover is requested and takes action if appropriate. Cleans up failover key if failover conditions are not matched. :returns: action message if demote was initiated, None if no action was taken""" failover = self.cluster.failover if not failover or (self.is_paused() and not self.state_handler.is_leader()): return if (failover.scheduled_at and not self.should_run_scheduled_action("failover", failover.scheduled_at, lambda: self.dcs.manual_failover('', '', index=failover.index))): return if not failover.leader or failover.leader == self.state_handler.name: if not failover.candidate or failover.candidate != self.state_handler.name: if not failover.candidate and self.is_paused(): logger.warning('Failover is possible only to a specific candidate in a paused state') else: if self.is_synchronous_mode(): if failover.candidate and not self.cluster.sync.matches(failover.candidate): logger.warning('Failover candidate=%s does not match with sync_standby=%s', failover.candidate, self.cluster.sync.sync_standby) members = [] else: members = [m for m in self.cluster.members if self.cluster.sync.matches(m.name)] else: members = [m for m in self.cluster.members if not failover.candidate or m.name == failover.candidate] if self.is_failover_possible(members): # check that there are healthy members ret = self._async_executor.try_run_async('manual failover: demote', self.demote, ('graceful',)) return ret or 'manual failover: demoting myself' else: logger.warning('manual failover: no healthy members found, failover is not possible') else: logger.warning('manual failover: I am already the leader, no need to failover') else: logger.warning('manual failover: leader name does not match: %s != %s', failover.leader, self.state_handler.name) logger.info('Cleaning up failover key') self.dcs.manual_failover('', '', index=failover.index) def process_unhealthy_cluster(self): """Cluster has no leader key""" if self.is_healthiest_node(): if self.acquire_lock(): failover = self.cluster.failover if failover: if self.is_paused() and failover.leader and failover.candidate: logger.info('Updating failover key after acquiring leader lock...') self.dcs.manual_failover('', failover.candidate, failover.scheduled_at, failover.index) else: logger.info('Cleaning up failover key after acquiring leader lock...') self.dcs.manual_failover('', '') self.load_cluster_from_dcs() if self.is_standby_cluster(): # standby leader disappeared, and this is a healthiest # replica, so it should become a new standby leader. # This imply that we need to start following a remote master msg = 'promoted self to a standby leader by acquiring session lock' return self.enforce_follow_remote_master(msg) else: return self.enforce_master_role( 'acquired session lock as a leader', 'promoted self to leader by acquiring session lock' ) else: return self.follow('demoted self after trying and failing to obtain lock', 'following new leader after trying and failing to obtain lock') else: # when we are doing manual failover there is no guaranty that new leader is ahead of any other node # node tagged as nofailover can be ahead of the new leader either, but it is always excluded from elections if bool(self.cluster.failover) or self.patroni.nofailover: self._rewind.trigger_check_diverged_lsn() time.sleep(2) # Give a time to somebody to take the leader lock if self.patroni.nofailover: return self.follow('demoting self because I am not allowed to become master', 'following a different leader because I am not allowed to promote') return self.follow('demoting self because i am not the healthiest node', 'following a different leader because i am not the healthiest node') def process_healthy_cluster(self): if self.has_lock(): if self.is_paused() and not self.state_handler.is_leader(): if self.cluster.failover and self.cluster.failover.candidate == self.state_handler.name: return 'waiting to become master after promote...' self._delete_leader() return 'removed leader lock because postgres is not running as master' if self.state_handler.is_leader() and self._leader_access_is_restricted: self.state_handler.slots_handler.sync_replication_slots(self.cluster) self.state_handler.call_nowait(ACTION_ON_ROLE_CHANGE) self.set_leader_access_is_restricted(False) if self.update_lock(True): msg = self.process_manual_failover_from_leader() if msg is not None: return msg # check if the node is ready to be used by pg_rewind self._rewind.check_for_checkpoint_after_promote() if self.is_standby_cluster(): # in case of standby cluster we don't really need to # enforce anything, since the leader is not a master. # So just remind the role. msg = 'no action. i am the standby leader with the lock' \ if self.state_handler.role == 'standby_leader' else \ 'promoted self to a standby leader because i had the session lock' return self.enforce_follow_remote_master(msg) else: return self.enforce_master_role( 'no action. i am the leader with the lock', 'promoted self to leader because i had the session lock' ) else: # Either there is no connection to DCS or someone else acquired the lock logger.error('failed to update leader lock') if self.state_handler.is_leader(): if self.is_paused(): return 'continue to run as master after failing to update leader lock in DCS' self.demote('immediate-nolock') return 'demoted self because failed to update leader lock in DCS' else: return 'not promoting because failed to update leader lock in DCS' else: logger.info('does not have lock') if self.is_standby_cluster(): return self.follow('cannot be a real master in standby cluster', 'no action. i am a secondary and i am following a standby leader', refresh=False) return self.follow('demoting self because i do not have the lock and i was a leader', 'no action. i am a secondary and i am following a leader', refresh=False) def evaluate_scheduled_restart(self): if self._async_executor.busy: # Restart already in progress return None # restart if we need to restart_data = self.future_restart_scheduled() if restart_data: recent_time = self.state_handler.postmaster_start_time() request_time = restart_data['postmaster_start_time'] # check if postmaster start time has changed since the last restart if recent_time and request_time and recent_time != request_time: logger.info("Cancelling scheduled restart: postgres restart has already happened at %s", recent_time) self.delete_future_restart() return None if (restart_data and self.should_run_scheduled_action('restart', restart_data['schedule'], self.delete_future_restart)): try: ret, message = self.restart(restart_data, run_async=True) if not ret: logger.warning("Scheduled restart: %s", message) return None return message finally: self.delete_future_restart() def restart_matches(self, role, postgres_version, pending_restart): reason_to_cancel = "" # checking the restart filters here seem to be less ugly than moving them into the # run_scheduled_action. if role and role != self.state_handler.role: reason_to_cancel = "host role mismatch" if postgres_version and postgres_version_to_int(postgres_version) <= int(self.state_handler.server_version): reason_to_cancel = "postgres version mismatch" if pending_restart and not self.state_handler.pending_restart: reason_to_cancel = "pending restart flag is not set" if not reason_to_cancel: return True else: logger.info("not proceeding with the restart: %s", reason_to_cancel) return False def schedule_future_restart(self, restart_data): with self._async_executor: restart_data['postmaster_start_time'] = self.state_handler.postmaster_start_time() if not self.patroni.scheduled_restart: self.patroni.scheduled_restart = restart_data self.touch_member() return True return False def delete_future_restart(self): ret = False with self._async_executor: if self.patroni.scheduled_restart: self.patroni.scheduled_restart = {} self.touch_member() ret = True return ret def future_restart_scheduled(self): return self.patroni.scheduled_restart.copy() if (self.patroni.scheduled_restart and isinstance(self.patroni.scheduled_restart, dict)) else None def restart_scheduled(self): return self._async_executor.scheduled_action == 'restart' def restart(self, restart_data, run_async=False): """ conditional and unconditional restart """ assert isinstance(restart_data, dict) if (not self.restart_matches(restart_data.get('role'), restart_data.get('postgres_version'), ('restart_pending' in restart_data))): return (False, "restart conditions are not satisfied") with self._async_executor: prev = self._async_executor.schedule('restart') if prev is not None: return (False, prev + ' already in progress') # Make the main loop to think that we were recovering dead postgres. If we fail # to start postgres after a specified timeout (see below), we need to remove # leader key (if it belong to us) rather than trying to start postgres once again. self.recovering = True # Now that restart is scheduled we can set timeout for startup, it will get reset # once async executor runs and main loop notices PostgreSQL as up. timeout = restart_data.get('timeout', self.patroni.config['master_start_timeout']) self.set_start_timeout(timeout) # For non async cases we want to wait for restart to complete or timeout before returning. do_restart = functools.partial(self.state_handler.restart, timeout, self._async_executor.critical_task) if self.is_synchronous_mode() and not self.has_lock(): do_restart = functools.partial(self.while_not_sync_standby, do_restart) if run_async: self._async_executor.run_async(do_restart) return (True, 'restart initiated') else: res = self._async_executor.run(do_restart) if res: return (True, 'restarted successfully') elif res is None: return (False, 'postgres is still starting') else: return (False, 'restart failed') def _do_reinitialize(self, cluster): self.state_handler.stop('immediate') # Commented redundant data directory cleanup here # self.state_handler.remove_data_directory() clone_member = self.cluster.get_clone_member(self.state_handler.name) member_role = 'leader' if clone_member == self.cluster.leader else 'replica' return self.clone(clone_member, "from {0} '{1}'".format(member_role, clone_member.name)) def reinitialize(self, force=False): with self._async_executor: self.load_cluster_from_dcs() if self.cluster.is_unlocked(): return 'Cluster has no leader, can not reinitialize' if self.has_lock(False): return 'I am the leader, can not reinitialize' if force: self._async_executor.cancel() with self._async_executor: action = self._async_executor.schedule('reinitialize') if action is not None: return '{0} already in progress'.format(action) self._async_executor.run_async(self._do_reinitialize, args=(self.cluster, )) def handle_long_action_in_progress(self): if self.has_lock() and self.update_lock(): return 'updated leader lock during ' + self._async_executor.scheduled_action elif not self.state_handler.bootstrapping: # Don't have lock, make sure we are not starting up a master in the background if self.state_handler.role == 'master': logger.info("Demoting master during " + self._async_executor.scheduled_action) if self._async_executor.scheduled_action == 'restart': # Restart needs a special interlocking cancel because postmaster may be just started in a # background thread and has not even written a pid file yet. with self._async_executor.critical_task as task: if not task.cancel(): self.state_handler.terminate_starting_postmaster(postmaster=task.result) self.demote('immediate-nolock') return 'lost leader lock during ' + self._async_executor.scheduled_action if self.cluster.is_unlocked(): logger.info('not healthy enough for leader race') return self._async_executor.scheduled_action + ' in progress' @staticmethod def sysid_valid(sysid): # sysid does tv_sec << 32, where tv_sec is the number of seconds sine 1970, # so even 1 << 32 would have 10 digits. sysid = str(sysid) return len(sysid) >= 10 and sysid.isdigit() def post_recover(self): if not self.state_handler.is_running(): self.watchdog.disable() if self.has_lock(): self.state_handler.set_role('demoted') self._delete_leader() return 'removed leader key after trying and failing to start postgres' return 'failed to start postgres' self._crash_recovery_executed = False return None def cancel_initialization(self): logger.info('removing initialize key after failed attempt to bootstrap the cluster') self.dcs.cancel_initialization() self.state_handler.stop('immediate') self.state_handler.move_data_directory() raise PatroniException('Failed to bootstrap cluster') def post_bootstrap(self): # bootstrap has failed if postgres is not running if not self.state_handler.is_running() or self._post_bootstrap_task.result is False: self.cancel_initialization() if self._post_bootstrap_task.result is None: if not self.state_handler.is_leader(): return 'waiting for end of recovery after bootstrap' self.state_handler.set_role('master') ret = self._async_executor.try_run_async('post_bootstrap', self.state_handler.bootstrap.post_bootstrap, args=(self.patroni.config['bootstrap'], self._post_bootstrap_task)) return ret or 'running post_bootstrap' self.state_handler.bootstrapping = False self.dcs.set_config_value(json.dumps(self.patroni.config.dynamic_configuration, separators=(',', ':'))) if not self.watchdog.activate(): logger.error('Cancelling bootstrap because watchdog activation failed') self.cancel_initialization() self.state_handler.slots_handler.sync_replication_slots(self.cluster) self.dcs.take_leader() self.set_is_leader(True) self.state_handler.call_nowait(ACTION_ON_START) self.load_cluster_from_dcs() return 'initialized a new cluster' def handle_starting_instance(self): """Starting up PostgreSQL may take a long time. In case we are the leader we may want to fail over to.""" # Check if we are in startup, when paused defer to main loop for manual failovers. if not self.state_handler.check_for_startup() or self.is_paused(): self.set_start_timeout(None) if self.is_paused(): self.state_handler.set_state(self.state_handler.is_running() and 'running' or 'stopped') return None # state_handler.state == 'starting' here if self.has_lock(): if not self.update_lock(): logger.info("Lost lock while starting up. Demoting self.") self.demote('immediate-nolock') return 'stopped PostgreSQL while starting up because leader key was lost' timeout = self._start_timeout or self.patroni.config['master_start_timeout'] time_left = timeout - self.state_handler.time_in_state() if time_left <= 0: if self.is_failover_possible(self.cluster.members): logger.info("Demoting self because master startup is taking too long") self.demote('immediate') return 'stopped PostgreSQL because of startup timeout' else: return 'master start has timed out, but continuing to wait because failover is not possible' else: msg = self.process_manual_failover_from_leader() if msg is not None: return msg return 'PostgreSQL is still starting up, {0:.0f} seconds until timeout'.format(time_left) else: # Use normal processing for standbys logger.info("Still starting up as a standby.") return None def set_start_timeout(self, value): """Sets timeout for starting as master before eligible for failover. Must be called when async_executor is busy or in the main thread.""" self._start_timeout = value def _run_cycle(self): dcs_failed = False try: self.state_handler.reset_cluster_info_state() self.load_cluster_from_dcs() if self.is_paused(): self.watchdog.disable() self._was_paused = True else: if self._was_paused: self.state_handler.schedule_sanity_checks_after_pause() self._was_paused = False if not self.cluster.has_member(self.state_handler.name): self.touch_member() # cluster has leader key but not initialize key if not (self.cluster.is_unlocked() or self.sysid_valid(self.cluster.initialize)) and self.has_lock(): self.dcs.initialize(create_new=(self.cluster.initialize is None), sysid=self.state_handler.sysid) if not (self.cluster.is_unlocked() or self.cluster.config and self.cluster.config.data) and self.has_lock(): self.dcs.set_config_value(json.dumps(self.patroni.config.dynamic_configuration, separators=(',', ':'))) self.cluster = self.dcs.get_cluster() if self._async_executor.busy: return self.handle_long_action_in_progress() msg = self.handle_starting_instance() if msg is not None: return msg # we've got here, so any async action has finished. if self.state_handler.bootstrapping: return self.post_bootstrap() if self.recovering and not self._rewind.is_needed: self.recovering = False # Check if we tried to recover and failed msg = self.post_recover() if msg is not None: return msg # is data directory empty? if self.state_handler.data_directory_empty(): self.state_handler.set_role('uninitialized') self.state_handler.stop('immediate') # In case datadir went away while we were master. self.watchdog.disable() # is this instance the leader? if self.has_lock(): self.release_leader_key_voluntarily() return 'released leader key voluntarily as data dir empty and currently leader' return self.bootstrap() # new node else: # check if we are allowed to join data_sysid = self.state_handler.sysid if not self.sysid_valid(data_sysid): # data directory is not empty, but no valid sysid, cluster must be broken, suggest reinit return ("data dir for the cluster is not empty, but system ID is invalid; consider doing" "reinitialize") if self.sysid_valid(self.cluster.initialize): if self.cluster.initialize != data_sysid: logger.fatal("system ID mismatch, node %s belongs to a different cluster: %s != %s", self.state_handler.name, self.cluster.initialize, data_sysid) sys.exit(1) elif self.cluster.is_unlocked() and not self.is_paused(): # "bootstrap", but data directory is not empty if not self.state_handler.cb_called and self.state_handler.is_running() \ and not self.state_handler.is_leader(): self._join_aborted = True logger.error('No initialize key in DCS and PostgreSQL is running as replica, aborting start') logger.error('Please first start Patroni on the node running as master') sys.exit(1) self.dcs.initialize(create_new=(self.cluster.initialize is None), sysid=data_sysid) if not self.state_handler.is_healthy(): if self.is_paused(): if self.has_lock(): self._delete_leader() return 'removed leader lock because postgres is not running' # Normally we don't start Postgres in a paused state. We make an exception for the demoted primary # that needs to be started after it had been stopped by demote. When there is no need to call rewind # the demote code follows through to starting Postgres right away, however, in the rewind case # it returns from demote and reaches this point to start PostgreSQL again after rewind. In that # case it makes no sense to continue to recover() unless rewind has finished successfully. elif self._rewind.failed or not self._rewind.executed and not \ (self._rewind.is_needed and self._rewind.can_rewind_or_reinitialize_allowed): return 'postgres is not running' # try to start dead postgres return self.recover() try: if self.cluster.is_unlocked(): return self.process_unhealthy_cluster() else: msg = self.process_healthy_cluster() return self.evaluate_scheduled_restart() or msg finally: # we might not have a valid PostgreSQL connection here if another thread # stops PostgreSQL, therefore, we only reload replication slots if no # asynchronous processes are running (should be always the case for the master) if not self._async_executor.busy and not self.state_handler.is_starting(): self.state_handler.slots_handler.sync_replication_slots(self.cluster) if not self.state_handler.cb_called: if not self.state_handler.is_leader(): self._rewind.trigger_check_diverged_lsn() self.state_handler.call_nowait(ACTION_ON_START) except DCSError: dcs_failed = True logger.error('Error communicating with DCS') if not self.is_paused() and self.state_handler.is_running() and self.state_handler.is_leader(): self.demote('offline') return 'demoted self because DCS is not accessible and i was a leader' return 'DCS is not accessible' except (psycopg2.Error, PostgresConnectionException): return 'Error communicating with PostgreSQL. Will try again later' finally: if not dcs_failed: self.touch_member() def run_cycle(self): with self._async_executor: info = self._run_cycle() return (self.is_paused() and 'PAUSE: ' or '') + info def shutdown(self): if self.is_paused(): logger.info('Leader key is not deleted and Postgresql is not stopped due paused state') self.watchdog.disable() elif not self._join_aborted: # FIXME: If stop doesn't reach safepoint quickly enough keepalive is triggered. If shutdown checkpoint # takes longer than ttl, then leader key is lost and replication might not have sent out all xlog. # This might not be the desired behavior of users, as a graceful shutdown of the host can mean lost data. # We probably need to something smarter here. disable_wd = self.watchdog.disable if self.watchdog.is_running else None self.while_not_sync_standby(lambda: self.state_handler.stop(checkpoint=False, on_safepoint=disable_wd)) if not self.state_handler.is_running(): if self.has_lock(): self.dcs.delete_leader() self.touch_member() else: # XXX: what about when Patroni is started as the wrong user that has access to the watchdog device # but cannot shut down PostgreSQL. Root would be the obvious example. Would be nice to not kill the # system due to a bad config. logger.error("PostgreSQL shutdown failed, leader key not removed." + (" Leaving watchdog running." if self.watchdog.is_running else "")) def watch(self, timeout): # watch on leader key changes if the postgres is running and leader is known and current node is not lock owner if self._async_executor.busy or self.cluster.is_unlocked() or self.has_lock(False): leader_index = None else: leader_index = self.cluster.leader.index return self.dcs.watch(leader_index, timeout) def wakeup(self): """Call of this method will trigger the next run of HA loop if there is no "active" leader watch request in progress. This usually happens on the master or if the node is running async action""" self.dcs.event.set() def get_remote_member(self, member=None): """ In case of standby cluster this will tel us from which remote master to stream. Config can be both patroni config or cluster.config.data """ cluster_params = self.get_standby_cluster_config() if cluster_params: name = member.name if member else 'remote_master:{}'.format(uuid.uuid1()) data = {k: v for k, v in cluster_params.items() if k in RemoteMember.allowed_keys()} data['no_replication_slot'] = 'primary_slot_name' not in cluster_params conn_kwargs = member.conn_kwargs() if member else \ {k: cluster_params[k] for k in ('host', 'port') if k in cluster_params} if conn_kwargs: data['conn_kwargs'] = conn_kwargs return RemoteMember(name, data) def get_remote_master(self): return self.get_remote_member() patroni-1.6.4/patroni/log.py000066400000000000000000000156651361356115100160240ustar00rootroot00000000000000import logging import os import sys from copy import deepcopy from logging.handlers import RotatingFileHandler from patroni.utils import deep_compare from six.moves.queue import Queue, Full from threading import Lock, Thread _LOGGER = logging.getLogger(__name__) def debug_exception(logger_obj, msg, *args, **kwargs): kwargs.pop("exc_info", False) if logger_obj.isEnabledFor(logging.DEBUG): logger_obj.debug(msg, *args, exc_info=True, **kwargs) else: msg = "{0}, DETAIL: '{1}'".format(msg, sys.exc_info()[1]) logger_obj.error(msg, *args, exc_info=False, **kwargs) def error_exception(logger_obj, msg, *args, **kwargs): exc_info = kwargs.pop("exc_info", True) logger_obj.error(msg, *args, exc_info=exc_info, **kwargs) class QueueHandler(logging.Handler): def __init__(self): logging.Handler.__init__(self) self.queue = Queue() self._records_lost = 0 def _put_record(self, record): self.format(record) record.msg = record.message record.args = None record.exc_info = None self.queue.put_nowait(record) def _try_to_report_lost_records(self): if self._records_lost: try: record = _LOGGER.makeRecord(_LOGGER.name, logging.WARNING, __file__, 0, 'QueueHandler has lost %s log records', (self._records_lost,), None, 'emit') self._put_record(record) self._records_lost = 0 except Exception: pass def emit(self, record): try: self._put_record(record) self._try_to_report_lost_records() except Exception: self._records_lost += 1 @property def records_lost(self): return self._records_lost class ProxyHandler(logging.Handler): def __init__(self, patroni_logger): logging.Handler.__init__(self) self.patroni_logger = patroni_logger def emit(self, record): self.patroni_logger.log_handler.handle(record) class PatroniLogger(Thread): DEFAULT_LEVEL = 'INFO' DEFAULT_TRACEBACK_LEVEL = 'ERROR' DEFAULT_FORMAT = '%(asctime)s %(levelname)s: %(message)s' NORMAL_LOG_QUEUE_SIZE = 2 # When everything goes normal Patroni writes only 2 messages per HA loop DEFAULT_MAX_QUEUE_SIZE = 1000 LOGGING_BROKEN_EXIT_CODE = 5 def __init__(self): super(PatroniLogger, self).__init__() self._queue_handler = QueueHandler() self._root_logger = logging.getLogger() self._config = None self.log_handler = None self.log_handler_lock = Lock() self._old_handlers = [] self.reload_config({'level': 'DEBUG'}) # We will switch to the QueueHandler only when thread was started. # This is necessary to protect from the cases when Patroni constructor # failed and PatroniLogger thread remain running and prevent shutdown. self._proxy_handler = ProxyHandler(self) self._root_logger.addHandler(self._proxy_handler) def update_loggers(self): loggers = deepcopy(self._config.get('loggers') or {}) for name, logger in self._root_logger.manager.loggerDict.items(): if not isinstance(logger, logging.PlaceHolder): level = loggers.pop(name, logging.NOTSET) logger.setLevel(level) for name, level in loggers.items(): logger = self._root_logger.manager.getLogger(name) logger.setLevel(level) def reload_config(self, config): if self._config is None or not deep_compare(self._config, config): with self._queue_handler.queue.mutex: self._queue_handler.queue.maxsize = config.get('max_queue_size', self.DEFAULT_MAX_QUEUE_SIZE) self._root_logger.setLevel(config.get('level', PatroniLogger.DEFAULT_LEVEL)) if config.get('traceback_level', PatroniLogger.DEFAULT_TRACEBACK_LEVEL).lower() == 'debug': logging.Logger.exception = debug_exception else: logging.Logger.exception = error_exception new_handler = None if 'dir' in config: if not isinstance(self.log_handler, RotatingFileHandler): new_handler = RotatingFileHandler(os.path.join(config['dir'], __name__)) handler = new_handler or self.log_handler handler.maxBytes = int(config.get('file_size', 25000000)) handler.backupCount = int(config.get('file_num', 4)) else: if self.log_handler is None or isinstance(self.log_handler, RotatingFileHandler): new_handler = logging.StreamHandler() handler = new_handler or self.log_handler oldlogformat = (self._config or {}).get('format', PatroniLogger.DEFAULT_FORMAT) logformat = config.get('format', PatroniLogger.DEFAULT_FORMAT) olddateformat = (self._config or {}).get('dateformat') or None dateformat = config.get('dateformat') or None # Convert empty string to `None` if oldlogformat != logformat or olddateformat != dateformat or new_handler: handler.setFormatter(logging.Formatter(logformat, dateformat)) if new_handler: with self.log_handler_lock: if self.log_handler: self._old_handlers.append(self.log_handler) self.log_handler = new_handler self._config = config.copy() self.update_loggers() def _close_old_handlers(self): while True: with self.log_handler_lock: if not self._old_handlers: break handler = self._old_handlers.pop() try: handler.close() except Exception: _LOGGER.exception('Failed to close the old log handler %s', handler) def run(self): # switch to QueueHandler only when the thread was started with self.log_handler_lock: self._root_logger.addHandler(self._queue_handler) self._root_logger.removeHandler(self._proxy_handler) while True: self._close_old_handlers() record = self._queue_handler.queue.get(True) if record is None: break self.log_handler.handle(record) self._queue_handler.queue.task_done() def shutdown(self): try: self._queue_handler.queue.put_nowait(None) except Full: # Queue is full. # It seems that logging is not working, exiting with non-standard exit-code is the best we can do. sys.exit(self.LOGGING_BROKEN_EXIT_CODE) self.join() logging.shutdown() @property def queue_size(self): return self._queue_handler.queue.qsize() @property def records_lost(self): return self._queue_handler.records_lost patroni-1.6.4/patroni/postgresql/000077500000000000000000000000001361356115100170575ustar00rootroot00000000000000patroni-1.6.4/patroni/postgresql/__init__.py000066400000000000000000001076211361356115100211770ustar00rootroot00000000000000import logging import os import psycopg2 import shlex import shutil import subprocess import time from contextlib import contextmanager from copy import deepcopy from patroni.postgresql.callback_executor import CallbackExecutor from patroni.postgresql.bootstrap import Bootstrap from patroni.postgresql.cancellable import CancellableSubprocess from patroni.postgresql.config import ConfigHandler from patroni.postgresql.connection import Connection, get_connection_cursor from patroni.postgresql.misc import parse_history, postgres_major_version_to_int from patroni.postgresql.postmaster import PostmasterProcess from patroni.postgresql.slots import SlotsHandler from patroni.exceptions import PostgresConnectionException from patroni.utils import Retry, RetryFailedError, polling_loop from threading import current_thread, Lock logger = logging.getLogger(__name__) ACTION_ON_START = "on_start" ACTION_ON_STOP = "on_stop" ACTION_ON_RESTART = "on_restart" ACTION_ON_RELOAD = "on_reload" ACTION_ON_ROLE_CHANGE = "on_role_change" ACTION_NOOP = "noop" STATE_RUNNING = 'running' STATE_REJECT = 'rejecting connections' STATE_NO_RESPONSE = 'not responding' STATE_UNKNOWN = 'unknown' STOP_POLLING_INTERVAL = 1 @contextmanager def null_context(): yield class Postgresql(object): def __init__(self, config): self.name = config['name'] self.scope = config['scope'] self._data_dir = config['data_dir'] self._database = config.get('database', 'postgres') self._version_file = os.path.join(self._data_dir, 'PG_VERSION') self._pg_control = os.path.join(self._data_dir, 'global', 'pg_control') self._major_version = self.get_major_version() self._state_lock = Lock() self.set_state('stopped') self._pending_restart = False self._connection = Connection() self.config = ConfigHandler(self, config) self.config.check_directories() self._bin_dir = config.get('bin_dir') or '' self.bootstrap = Bootstrap(self) self.bootstrapping = False self.__thread_ident = current_thread().ident self.slots_handler = SlotsHandler(self) self._callback_executor = CallbackExecutor() self.__cb_called = False self.__cb_pending = None self.cancellable = CancellableSubprocess() self._sysid = None self.retry = Retry(max_tries=-1, deadline=config['retry_timeout']/2.0, max_delay=1, retry_exceptions=PostgresConnectionException) # Retry 'pg_is_in_recovery()' only once self._is_leader_retry = Retry(max_tries=1, deadline=config['retry_timeout']/2.0, max_delay=1, retry_exceptions=PostgresConnectionException) self._role_lock = Lock() self.set_role(self.get_postgres_role_from_data_directory()) self._state_entry_timestamp = None self._cluster_info_state = {} self._cached_replica_timeline = None # Last known running process self._postmaster_proc = None if self.is_running(): self.set_state('running') self.set_role('master' if self.is_leader() else 'replica') self.config.write_postgresql_conf() # we are "joining" already running postgres hba_saved = self.config.replace_pg_hba() ident_saved = self.config.replace_pg_ident() if hba_saved or ident_saved: self.reload() elif self.role == 'master': self.set_role('demoted') @property def create_replica_methods(self): return self.config.get('create_replica_methods', []) or self.config.get('create_replica_method', []) @property def major_version(self): return self._major_version @property def database(self): return self._database @property def data_dir(self): return self._data_dir @property def callback(self): return self.config.get('callbacks') or {} @property def wal_name(self): return 'wal' if self._major_version >= 100000 else 'xlog' @property def lsn_name(self): return 'lsn' if self._major_version >= 100000 else 'location' @property def cluster_info_query(self): pg_control_timeline = 'timeline_id FROM pg_catalog.pg_control_checkpoint()' \ if self._major_version >= 90600 and self.role == 'standby_leader' else '0' return ("SELECT CASE WHEN pg_catalog.pg_is_in_recovery() THEN 0 " "ELSE ('x' || pg_catalog.substr(pg_catalog.pg_{0}file_name(" "pg_catalog.pg_current_{0}_{1}()), 1, 8))::bit(32)::int END, " "CASE WHEN pg_catalog.pg_is_in_recovery() THEN GREATEST(" " pg_catalog.pg_{0}_{1}_diff(COALESCE(" "pg_catalog.pg_last_{0}_receive_{1}(), '0/0'), '0/0')::bigint," " pg_catalog.pg_{0}_{1}_diff(pg_catalog.pg_last_{0}_replay_{1}(), '0/0')::bigint)" "ELSE pg_catalog.pg_{0}_{1}_diff(pg_catalog.pg_current_{0}_{1}(), '0/0')::bigint " "END, {2}").format(self.wal_name, self.lsn_name, pg_control_timeline) def _version_file_exists(self): return not self.data_directory_empty() and os.path.isfile(self._version_file) def get_major_version(self): if self._version_file_exists(): try: with open(self._version_file) as f: return postgres_major_version_to_int(f.read().strip()) except Exception: logger.exception('Failed to read PG_VERSION from %s', self._data_dir) return 0 def pgcommand(self, cmd): """Returns path to the specified PostgreSQL command""" return os.path.join(self._bin_dir, cmd) def pg_ctl(self, cmd, *args, **kwargs): """Builds and executes pg_ctl command :returns: `!True` when return_code == 0, otherwise `!False`""" pg_ctl = [self.pgcommand('pg_ctl'), cmd] return subprocess.call(pg_ctl + ['-D', self._data_dir] + list(args), **kwargs) == 0 def pg_isready(self): """Runs pg_isready to see if PostgreSQL is accepting connections. :returns: 'ok' if PostgreSQL is up, 'reject' if starting up, 'no_resopnse' if not up.""" r = self.config.local_connect_kwargs cmd = [self.pgcommand('pg_isready'), '-p', r['port'], '-d', self._database] # Host is not set if we are connecting via default unix socket if 'host' in r: cmd.extend(['-h', r['host']]) # We only need the username because pg_isready does not try to authenticate if 'user' in r: cmd.extend(['-U', r['user']]) ret = subprocess.call(cmd) return_codes = {0: STATE_RUNNING, 1: STATE_REJECT, 2: STATE_NO_RESPONSE, 3: STATE_UNKNOWN} return return_codes.get(ret, STATE_UNKNOWN) def reload_config(self, config, sighup=False): self.config.reload_config(config, sighup) self._is_leader_retry.deadline = self.retry.deadline = config['retry_timeout']/2.0 @property def pending_restart(self): return self._pending_restart def set_pending_restart(self, value): self._pending_restart = value @property def sysid(self): if not self._sysid and not self.bootstrapping: data = self.controldata() self._sysid = data.get('Database system identifier', "") return self._sysid def get_postgres_role_from_data_directory(self): if self.data_directory_empty() or not self.controldata(): return 'uninitialized' elif self.config.recovery_conf_exists(): return 'replica' else: return 'master' @property def server_version(self): return self._connection.server_version def connection(self): return self._connection.get() def set_connection_kwargs(self, kwargs): self._connection.set_conn_kwargs(kwargs) def _query(self, sql, *params): """We are always using the same cursor, therefore this method is not thread-safe!!! You can call it from different threads only if you are holding explicit `AsyncExecutor` lock, because the main thread is always holding this lock when running HA cycle.""" cursor = None try: cursor = self._connection.cursor() cursor.execute(sql, params) return cursor except psycopg2.Error as e: if cursor and cursor.connection.closed == 0: # When connected via unix socket, psycopg2 can't recoginze 'connection lost' # and leaves `_cursor_holder.connection.closed == 0`, but psycopg2.OperationalError # is still raised (what is correct). It doesn't make sense to continiue with existing # connection and we will close it, to avoid its reuse by the `cursor` method. if isinstance(e, psycopg2.OperationalError): self._connection.close() else: raise e if self.state == 'restarting': raise RetryFailedError('cluster is being restarted') raise PostgresConnectionException('connection problems') def query(self, sql, *args, **kwargs): if not kwargs.get('retry', True): return self._query(sql, *args) try: return self.retry(self._query, sql, *args) except RetryFailedError as e: raise PostgresConnectionException(str(e)) def pg_control_exists(self): return os.path.isfile(self._pg_control) def data_directory_empty(self): if self.pg_control_exists(): return False if not os.path.exists(self._data_dir): return True return all(os.name != 'nt' and (n.startswith('.') or n == 'lost+found') for n in os.listdir(self._data_dir)) def replica_method_options(self, method): return deepcopy(self.config.get(method, {})) def replica_method_can_work_without_replication_connection(self, method): return method != 'basebackup' and self.replica_method_options(method).get('no_master') def can_create_replica_without_replication_connection(self, replica_methods=None): """ go through the replication methods to see if there are ones that does not require a working replication connection. """ if replica_methods is None: replica_methods = self.create_replica_methods return any(self.replica_method_can_work_without_replication_connection(m) for m in replica_methods) def reset_cluster_info_state(self): self._cluster_info_state = {} def _cluster_info_state_get(self, name): if not self._cluster_info_state: try: result = self._is_leader_retry(self._query, self.cluster_info_query).fetchone() self._cluster_info_state = dict(zip(['timeline', 'wal_position', 'pg_control_timeline'], result)) except RetryFailedError as e: # SELECT failed two times self._cluster_info_state = {'error': str(e)} if not self.is_starting() and self.pg_isready() == STATE_REJECT: self.set_state('starting') if 'error' in self._cluster_info_state: raise PostgresConnectionException(self._cluster_info_state['error']) return self._cluster_info_state.get(name) def is_leader(self): return bool(self._cluster_info_state_get('timeline')) def pg_control_timeline(self): try: return int(self.controldata().get("Latest checkpoint's TimeLineID")) except (TypeError, ValueError): logger.exception('Failed to parse timeline from pg_controldata output') def is_running(self): """Returns PostmasterProcess if one is running on the data directory or None. If most recently seen process is running updates the cached process based on pid file.""" if self._postmaster_proc: if self._postmaster_proc.is_running(): return self._postmaster_proc self._postmaster_proc = None # we noticed that postgres was restarted, force syncing of replication self.slots_handler.schedule() self._postmaster_proc = PostmasterProcess.from_pidfile(self._data_dir) return self._postmaster_proc @property def cb_called(self): return self.__cb_called def call_nowait(self, cb_name): """ pick a callback command and call it without waiting for it to finish """ if self.bootstrapping: return if cb_name in (ACTION_ON_START, ACTION_ON_STOP, ACTION_ON_RESTART, ACTION_ON_ROLE_CHANGE): self.__cb_called = True if self.callback and cb_name in self.callback: cmd = self.callback[cb_name] try: cmd = shlex.split(self.callback[cb_name]) + [cb_name, self.role, self.scope] self._callback_executor.call(cmd) except Exception: logger.exception('callback %s %s %s %s failed', cmd, cb_name, self.role, self.scope) @property def role(self): with self._role_lock: return self._role def set_role(self, value): with self._role_lock: self._role = value @property def state(self): with self._state_lock: return self._state def set_state(self, value): with self._state_lock: self._state = value self._state_entry_timestamp = time.time() def time_in_state(self): return time.time() - self._state_entry_timestamp def is_starting(self): return self.state == 'starting' def wait_for_port_open(self, postmaster, timeout): """Waits until PostgreSQL opens ports.""" for _ in polling_loop(timeout): if self.cancellable.is_cancelled: return False if not postmaster.is_running(): logger.error('postmaster is not running') self.set_state('start failed') return False isready = self.pg_isready() if isready != STATE_NO_RESPONSE: if isready not in [STATE_REJECT, STATE_RUNNING]: logger.warning("Can't determine PostgreSQL startup status, assuming running") return True logger.warning("Timed out waiting for PostgreSQL to start") return False def start(self, timeout=None, task=None, block_callbacks=False, role=None): """Start PostgreSQL Waits for postmaster to open ports or terminate so pg_isready can be used to check startup completion or failure. :returns: True if start was initiated and postmaster ports are open, False if start failed""" # make sure we close all connections established against # the former node, otherwise, we might get a stalled one # after kill -9, which would report incorrect data to # patroni. self._connection.close() if self.is_running(): logger.error('Cannot start PostgreSQL because one is already running.') self.set_state('starting') return True if not block_callbacks: self.__cb_pending = ACTION_ON_START self.set_role(role or self.get_postgres_role_from_data_directory()) self.set_state('starting') self._pending_restart = False configuration = self.config.effective_configuration self.config.check_directories() self.config.write_postgresql_conf(configuration) self.config.resolve_connection_addresses() self.config.replace_pg_hba() self.config.replace_pg_ident() options = ['--{0}={1}'.format(p, configuration[p]) for p in self.config.CMDLINE_OPTIONS if p in configuration and p != 'wal_keep_segments'] if self.cancellable.is_cancelled: return False with task or null_context(): if task and task.is_cancelled: logger.info("PostgreSQL start cancelled.") return False self._postmaster_proc = PostmasterProcess.start(self.pgcommand('postgres'), self._data_dir, self.config.postgresql_conf, options) if task: task.complete(self._postmaster_proc) start_timeout = timeout if not start_timeout: try: start_timeout = float(self.config.get('pg_ctl_timeout', 60)) except ValueError: start_timeout = 60 # We want postmaster to open ports before we continue if not self._postmaster_proc or not self.wait_for_port_open(self._postmaster_proc, start_timeout): return False ret = self.wait_for_startup(start_timeout) if ret is not None: return ret elif timeout is not None: return False else: return None def checkpoint(self, connect_kwargs=None): check_not_is_in_recovery = connect_kwargs is not None connect_kwargs = connect_kwargs or self.config.local_connect_kwargs for p in ['connect_timeout', 'options']: connect_kwargs.pop(p, None) try: with get_connection_cursor(**connect_kwargs) as cur: cur.execute("SET statement_timeout = 0") if check_not_is_in_recovery: cur.execute('SELECT pg_catalog.pg_is_in_recovery()') if cur.fetchone()[0]: return 'is_in_recovery=true' return cur.execute('CHECKPOINT') except psycopg2.Error: logger.exception('Exception during CHECKPOINT') return 'not accessible or not healty' def stop(self, mode='fast', block_callbacks=False, checkpoint=None, on_safepoint=None): """Stop PostgreSQL Supports a callback when a safepoint is reached. A safepoint is when no user backend can return a successful commit to users. Currently this means we wait for user backends to close. But in the future alternate mechanisms could be added. :param on_safepoint: This callback is called when no user backends are running. """ if checkpoint is None: checkpoint = False if mode == 'immediate' else True success, pg_signaled = self._do_stop(mode, block_callbacks, checkpoint, on_safepoint) if success: # block_callbacks is used during restart to avoid # running start/stop callbacks in addition to restart ones if not block_callbacks: self.set_state('stopped') if pg_signaled: self.call_nowait(ACTION_ON_STOP) else: logger.warning('pg_ctl stop failed') self.set_state('stop failed') return success def _do_stop(self, mode, block_callbacks, checkpoint, on_safepoint): postmaster = self.is_running() if not postmaster: if on_safepoint: on_safepoint() return True, False if checkpoint and not self.is_starting(): self.checkpoint() if not block_callbacks: self.set_state('stopping') # Send signal to postmaster to stop success = postmaster.signal_stop(mode, self.pgcommand('pg_ctl')) if success is not None: if success and on_safepoint: on_safepoint() return success, True # We can skip safepoint detection if we don't have a callback if on_safepoint: # Wait for our connection to terminate so we can be sure that no new connections are being initiated self._wait_for_connection_close(postmaster) postmaster.wait_for_user_backends_to_close() on_safepoint() postmaster.wait() return True, True def terminate_starting_postmaster(self, postmaster): """Terminates a postmaster that has not yet opened ports or possibly even written a pid file. Blocks until the process goes away.""" postmaster.signal_stop('immediate', self.pgcommand('pg_ctl')) postmaster.wait() def _wait_for_connection_close(self, postmaster): try: with self.connection().cursor() as cur: while postmaster.is_running(): # Need a timeout here? cur.execute("SELECT 1") time.sleep(STOP_POLLING_INTERVAL) except psycopg2.Error: pass def reload(self): ret = self.pg_ctl('reload') if ret: self.call_nowait(ACTION_ON_RELOAD) return ret def check_for_startup(self): """Checks PostgreSQL status and returns if PostgreSQL is in the middle of startup.""" return self.is_starting() and not self.check_startup_state_changed() def check_startup_state_changed(self): """Checks if PostgreSQL has completed starting up or failed or still starting. Should only be called when state == 'starting' :returns: True if state was changed from 'starting' """ ready = self.pg_isready() if ready == STATE_REJECT: return False elif ready == STATE_NO_RESPONSE: ret = not self.is_running() if ret: self.set_state('start failed') self.slots_handler.schedule(False) # TODO: can remove this? self.config.save_configuration_files(True) # TODO: maybe remove this? return ret else: if ready != STATE_RUNNING: # Bad configuration or unexpected OS error. No idea of PostgreSQL status. # Let the main loop of run cycle clean up the mess. logger.warning("%s status returned from pg_isready", "Unknown" if ready == STATE_UNKNOWN else "Invalid") self.set_state('running') self.slots_handler.schedule() self.config.save_configuration_files(True) # TODO: __cb_pending can be None here after PostgreSQL restarts on its own. Do we want to call the callback? # Previously we didn't even notice. action = self.__cb_pending or ACTION_ON_START self.call_nowait(action) self.__cb_pending = None return True def wait_for_startup(self, timeout=None): """Waits for PostgreSQL startup to complete or fail. :returns: True if start was successful, False otherwise""" if not self.is_starting(): # Should not happen logger.warning("wait_for_startup() called when not in starting state") while not self.check_startup_state_changed(): if self.cancellable.is_cancelled or timeout and self.time_in_state() > timeout: return None time.sleep(1) return self.state == 'running' def restart(self, timeout=None, task=None, block_callbacks=False, role=None): """Restarts PostgreSQL. When timeout parameter is set the call will block either until PostgreSQL has started, failed to start or timeout arrives. :returns: True when restart was successful and timeout did not expire when waiting. """ self.set_state('restarting') if not block_callbacks: self.__cb_pending = ACTION_ON_RESTART ret = self.stop(block_callbacks=True) and self.start(timeout, task, True, role) if not ret and not self.is_starting(): self.set_state('restart failed ({0})'.format(self.state)) return ret def is_healthy(self): if not self.is_running(): logger.warning('Postgresql is not running.') return False return True def controldata(self): """ return the contents of pg_controldata, or non-True value if pg_controldata call failed """ result = {} # Don't try to call pg_controldata during backup restore if self._version_file_exists() and self.state != 'creating replica': try: env = os.environ.copy() env.update(LANG='C', LC_ALL='C') data = subprocess.check_output([self.pgcommand('pg_controldata'), self._data_dir], env=env) if data: data = data.decode('utf-8').splitlines() # pg_controldata output depends on major verion. Some of parameters are prefixed by 'Current ' result = {l.split(':')[0].replace('Current ', '', 1): l.split(':', 1)[1].strip() for l in data if l and ':' in l} except subprocess.CalledProcessError: logger.exception("Error when calling pg_controldata") return result @contextmanager def get_replication_connection_cursor(self, host='localhost', port=5432, database=None, **kwargs): conn_kwargs = self.config.replication.copy() conn_kwargs.update(host=host, port=int(port), database=database or self._database, connect_timeout=3, user=conn_kwargs.pop('username'), replication=1, options='-c statement_timeout=2000') with get_connection_cursor(**conn_kwargs) as cur: yield cur def get_local_timeline_lsn_from_replication_connection(self): timeline = lsn = None try: with self.get_replication_connection_cursor(**self.config.local_replication_address) as cur: cur.execute('IDENTIFY_SYSTEM') timeline, lsn = cur.fetchone()[1:3] except Exception: logger.exception('Can not fetch local timeline and lsn from replication connection') return timeline, lsn def get_replica_timeline(self): return self.get_local_timeline_lsn_from_replication_connection()[0] def replica_cached_timeline(self, master_timeline): if not self._cached_replica_timeline or not master_timeline or self._cached_replica_timeline != master_timeline: self._cached_replica_timeline = self.get_replica_timeline() return self._cached_replica_timeline def get_master_timeline(self): return self._cluster_info_state_get('timeline') def get_history(self, timeline): history_path = 'pg_{0}/{1:08X}.history'.format(self.wal_name, timeline) try: cursor = self._connection.cursor() cursor.execute('SELECT isdir, modification FROM pg_catalog.pg_stat_file(%s)', (history_path,)) isdir, modification = cursor.fetchone() if not isdir: cursor.execute('SELECT pg_catalog.pg_read_file(%s)', (history_path,)) history = list(parse_history(cursor.fetchone()[0])) if history[-1][0] == timeline - 1: history[-1].append(modification.isoformat()) return history except Exception: logger.exception('Failed to read and parse %s', (history_path,)) def follow(self, member, role='replica', timeout=None, do_reload=False): recovery_params = self.config.build_recovery_params(member) self.config.write_recovery_conf(recovery_params) # When we demoting the master or standby_leader to replica or promoting replica to a standby_leader # and we know for sure that postgres was already running before, we will only execute on_role_change # callback and prevent execution of on_restart/on_start callback. # If the role remains the same (replica or standby_leader), we will execute on_start or on_restart change_role = self.cb_called and (self.role in ('master', 'demoted') or not {'standby_leader', 'replica'} - {self.role, role}) if change_role: self.__cb_pending = ACTION_NOOP if self.is_running(): if do_reload: self.config.write_postgresql_conf() self.reload() else: self.restart(block_callbacks=change_role, role=role) else: self.start(timeout=timeout, block_callbacks=change_role, role=role) if change_role: # TODO: postpone this until start completes, or maybe do even earlier self.call_nowait(ACTION_ON_ROLE_CHANGE) return True def _wait_promote(self, wait_seconds): for _ in polling_loop(wait_seconds): data = self.controldata() if data.get('Database cluster state') == 'in production': return True def promote(self, wait_seconds, on_success=None, access_is_restricted=False): if self.role == 'master': return True ret = self.pg_ctl('promote', '-W') if ret: self.set_role('master') if on_success is not None: on_success() if not access_is_restricted: self.call_nowait(ACTION_ON_ROLE_CHANGE) ret = self._wait_promote(wait_seconds) return ret def timeline_wal_position(self): # This method could be called from different threads (simultaneously with some other `_query` calls). # If it is called not from main thread we will create a new cursor to execute statement. if current_thread().ident == self.__thread_ident: return (self._cluster_info_state_get('timeline'), self._cluster_info_state_get('wal_position'), self._cluster_info_state_get('pg_control_timeline')) with self.connection().cursor() as cursor: cursor.execute(self.cluster_info_query) return cursor.fetchone()[:3] def postmaster_start_time(self): try: query = "SELECT pg_catalog.to_char(pg_catalog.pg_postmaster_start_time(), 'YYYY-MM-DD HH24:MI:SS.MS TZ')" if current_thread().ident == self.__thread_ident: return self.query(query).fetchone()[0] with self.connection().cursor() as cursor: cursor.execute(query) return cursor.fetchone()[0] except psycopg2.Error: return None def last_operation(self): return str(self._cluster_info_state_get('wal_position')) def configure_server_parameters(self): self._major_version = self.get_major_version() self.config.setup_server_parameters() return True def move_data_directory(self): if os.path.isdir(self._data_dir) and not self.is_running(): try: new_name = '{0}_{1}'.format(self._data_dir, time.strftime('%Y-%m-%d-%H-%M-%S')) logger.info('renaming data directory to %s', new_name) os.rename(self._data_dir, new_name) except OSError: logger.exception("Could not rename data directory %s", self._data_dir) def remove_data_directory(self): self.set_role('uninitialized') logger.info('Removing data directory: %s', self._data_dir) try: if os.path.islink(self._data_dir): os.unlink(self._data_dir) elif not os.path.exists(self._data_dir): return elif os.path.isfile(self._data_dir): os.remove(self._data_dir) elif os.path.isdir(self._data_dir): # let's see if pg_xlog|pg_wal is a symlink, in this case we # should clean the target for pg_wal_dir in ('pg_xlog', 'pg_wal'): pg_wal_path = os.path.join(self._data_dir, pg_wal_dir) if os.path.exists(pg_wal_path) and os.path.islink(pg_wal_path): pg_wal_realpath = os.path.realpath(pg_wal_path) logger.info('Removing WAL directory: %s', pg_wal_realpath) shutil.rmtree(pg_wal_realpath) shutil.rmtree(self._data_dir) except (IOError, OSError): logger.exception('Could not remove data directory %s', self._data_dir) self.move_data_directory() def pick_synchronous_standby(self, cluster): """Finds the best candidate to be the synchronous standby. Current synchronous standby is always preferred, unless it has disconnected or does not want to be a synchronous standby any longer. :returns tuple of candidate name or None, and bool showing if the member is the active synchronous standby. """ current = cluster.sync.sync_standby current = current.lower() if current else current members = {m.name.lower(): m for m in cluster.members} candidates = [] # Pick candidates based on who has flushed WAL farthest. # TODO: for synchronous_commit = remote_write we actually want to order on write_location for app_name, state, sync_state in self.query( "SELECT pg_catalog.lower(application_name), state, sync_state" " FROM pg_catalog.pg_stat_replication" " ORDER BY flush_{0} DESC".format(self.lsn_name)): member = members.get(app_name) if state != 'streaming' or not member or member.tags.get('nosync', False): continue if sync_state == 'sync': return member.name, True if sync_state == 'potential' and app_name == current: # Prefer current even if not the best one any more to avoid indecisivness and spurious swaps. return cluster.sync.sync_standby, False if sync_state in ('async', 'potential'): candidates.append(member.name) if candidates: return candidates[0], False return None, False def read_postmaster_opts(self): """returns the list of option names/values from postgres.opts, Empty dict if read failed or no file""" result = {} try: with open(os.path.join(self._data_dir, 'postmaster.opts')) as f: data = f.read() for opt in data.split('" "'): if '=' in opt and opt.startswith('--'): name, val = opt.split('=', 1) result[name.strip('-')] = val.rstrip('"\n') except IOError: logger.exception('Error when reading postmaster.opts') return result def single_user_mode(self, command=None, options=None): """run a given command in a single-user mode. If the command is empty - then just start and stop""" cmd = [self.pgcommand('postgres'), '--single', '-D', self._data_dir] for opt, val in sorted((options or {}).items()): cmd.extend(['-c', '{0}={1}'.format(opt, val)]) # need a database name to connect cmd.append(self._database) return self.cancellable.call(cmd, communicate_input=command) def cleanup_archive_status(self): status_dir = os.path.join(self._data_dir, 'pg_' + self.wal_name, 'archive_status') try: for f in os.listdir(status_dir): path = os.path.join(status_dir, f) try: if os.path.islink(path): os.unlink(path) elif os.path.isfile(path): os.remove(path) except OSError: logger.exception('Unable to remove %s', path) except OSError: logger.exception('Unable to list %s', status_dir) def fix_cluster_state(self): self.cleanup_archive_status() # Start in a single user mode and stop to produce a clean shutdown opts = self.read_postmaster_opts() opts.update({'archive_mode': 'on', 'archive_command': 'false'}) self.config.remove_recovery_conf() return self.single_user_mode(options=opts) == 0 or None def schedule_sanity_checks_after_pause(self): """ After coming out of pause we have to: 1. sync replication slots, because it might happen that slots were removed 2. get new 'Database system identifier' to make sure that it wasn't changed """ self.slots_handler.schedule() self._sysid = None patroni-1.6.4/patroni/postgresql/bootstrap.py000066400000000000000000000423261361356115100214550ustar00rootroot00000000000000import logging import os import shlex import tempfile import time from patroni.dcs import RemoteMember from patroni.utils import deep_compare from six import string_types logger = logging.getLogger(__name__) class Bootstrap(object): def __init__(self, postgresql): self._postgresql = postgresql self._running_custom_bootstrap = False @property def running_custom_bootstrap(self): return self._running_custom_bootstrap @property def keep_existing_recovery_conf(self): return self._running_custom_bootstrap and self._keep_existing_recovery_conf @staticmethod def process_user_options(tool, options, not_allowed_options, error_handler): user_options = [] def option_is_allowed(name): ret = name not in not_allowed_options if not ret: error_handler('{0} option for {1} is not allowed'.format(name, tool)) return ret if isinstance(options, dict): for k, v in options.items(): if k and v: user_options.append('--{0}={1}'.format(k, v)) elif isinstance(options, list): for opt in options: if isinstance(opt, string_types) and option_is_allowed(opt): user_options.append('--{0}'.format(opt)) elif isinstance(opt, dict): keys = list(opt.keys()) if len(keys) != 1 or not isinstance(opt[keys[0]], string_types) or not option_is_allowed(keys[0]): error_handler('Error when parsing {0} key-value option {1}: only one key-value is allowed' ' and value should be a string'.format(tool, opt[keys[0]])) user_options.append('--{0}={1}'.format(keys[0], opt[keys[0]])) else: error_handler('Error when parsing {0} option {1}: value should be string value' ' or a single key-value pair'.format(tool, opt)) else: error_handler('{0} options must be list ot dict'.format(tool)) return user_options def _initdb(self, config): self._postgresql.set_state('initalizing new cluster') not_allowed_options = ('pgdata', 'nosync', 'pwfile', 'sync-only', 'version') def error_handler(e): raise Exception(e) options = self.process_user_options('initdb', config or [], not_allowed_options, error_handler) pwfile = None if self._postgresql.config.superuser: if 'username' in self._postgresql.config.superuser: options.append('--username={0}'.format(self._postgresql.config.superuser['username'])) if 'password' in self._postgresql.config.superuser: (fd, pwfile) = tempfile.mkstemp() os.write(fd, self._postgresql.config.superuser['password'].encode('utf-8')) os.close(fd) options.append('--pwfile={0}'.format(pwfile)) options = ['-o', ' '.join(options)] if options else [] ret = self._postgresql.pg_ctl('initdb', *options) if pwfile: os.remove(pwfile) if ret: self._postgresql.configure_server_parameters() else: self._postgresql.set_state('initdb failed') return ret def _post_restore(self): self._postgresql.config.restore_configuration_files() self._postgresql.configure_server_parameters() # make sure there is no trigger file or postgres will be automatically promoted trigger_file = 'promote_trigger_file' if self._postgresql.major_version >= 120000 else 'trigger_file' trigger_file = self._postgresql.config.get('recovery_conf', {}).get(trigger_file) or 'promote' trigger_file = os.path.abspath(os.path.join(self._postgresql.data_dir, trigger_file)) if os.path.exists(trigger_file): os.unlink(trigger_file) def _custom_bootstrap(self, config): self._postgresql.set_state('running custom bootstrap script') params = ['--scope=' + self._postgresql.scope, '--datadir=' + self._postgresql.data_dir] try: logger.info('Running custom bootstrap script: %s', config['command']) if self._postgresql.cancellable.call(shlex.split(config['command']) + params) != 0: self._postgresql.set_state('custom bootstrap failed') return False except Exception: logger.exception('Exception during custom bootstrap') return False self._post_restore() if 'recovery_conf' in config: self._postgresql.config.write_recovery_conf(config['recovery_conf']) elif not self.keep_existing_recovery_conf: self._postgresql.config.remove_recovery_conf() return True def call_post_bootstrap(self, config): """ runs a script after initdb or custom bootstrap script is called and waits until completion. """ cmd = config.get('post_bootstrap') or config.get('post_init') if cmd: r = self._postgresql.config.local_connect_kwargs connstring = self._postgresql.config.format_dsn(r, True) if 'host' not in r: # https://www.postgresql.org/docs/current/static/libpq-pgpass.html # A host name of localhost matches both TCP (host name localhost) and Unix domain socket # (pghost empty or the default socket directory) connections coming from the local machine. r['host'] = 'localhost' # set it to localhost to write into pgpass env = self._postgresql.config.write_pgpass(r) if 'password' in r else None try: ret = self._postgresql.cancellable.call(shlex.split(cmd) + [connstring], env=env) except OSError: logger.error('post_init script %s failed', cmd) return False if ret != 0: logger.error('post_init script %s returned non-zero code %d', cmd, ret) return False return True def create_replica(self, clone_member): """ create the replica according to the replica_method defined by the user. this is a list, so we need to loop through all methods the user supplies """ self._postgresql.set_state('creating replica') self._postgresql.schedule_sanity_checks_after_pause() is_remote_master = isinstance(clone_member, RemoteMember) # get list of replica methods either from clone member or from # the config. If there is no configuration key, or no value is # specified, use basebackup replica_methods = (clone_member.create_replica_methods if is_remote_master else self._postgresql.create_replica_methods) or ['basebackup'] if clone_member and clone_member.conn_url: r = clone_member.conn_kwargs(self._postgresql.config.replication) # add the credentials to connect to the replica origin to pgpass. env = self._postgresql.config.write_pgpass(r) connstring = self._postgresql.config.format_dsn(r, True) else: connstring = '' env = os.environ.copy() # if we don't have any source, leave only replica methods that work without it replica_methods = [r for r in replica_methods if self._postgresql.replica_method_can_work_without_replication_connection(r)] # go through them in priority order ret = 1 for replica_method in replica_methods: if self._postgresql.cancellable.is_cancelled: break method_config = self._postgresql.replica_method_options(replica_method) # if the method is basebackup, then use the built-in if replica_method == "basebackup": ret = self.basebackup(connstring, env, method_config) if ret == 0: logger.info("replica has been created using basebackup") # if basebackup succeeds, exit with success break else: if not self._postgresql.data_directory_empty(): if method_config.get('keep_data', False): logger.info('Leaving data directory uncleaned') else: self._postgresql.remove_data_directory() cmd = replica_method # user-defined method; check for configuration # not required, actually if method_config: # look to see if the user has supplied a full command path # if not, use the method name as the command cmd = method_config.pop('command', cmd) # add the default parameters if not method_config.get('no_params', False): method_config.update({"scope": self._postgresql.scope, "role": "replica", "datadir": self._postgresql.data_dir, "connstring": connstring}) else: for param in ('no_params', 'no_master', 'keep_data'): method_config.pop(param, None) params = ["--{0}={1}".format(arg, val) for arg, val in method_config.items()] try: # call script with the full set of parameters ret = self._postgresql.cancellable.call(shlex.split(cmd) + params, env=env) # if we succeeded, stop if ret == 0: logger.info('replica has been created using %s', replica_method) break else: logger.error('Error creating replica using method %s: %s exited with code=%s', replica_method, cmd, ret) except Exception: logger.exception('Error creating replica using method %s', replica_method) ret = 1 self._postgresql.set_state('stopped') return ret def basebackup(self, conn_url, env, options): # creates a replica data dir using pg_basebackup. # this is the default, built-in create_replica_methods # tries twice, then returns failure (as 1) # uses "stream" as the xlog-method to avoid sync issues # supports additional user-supplied options, those are not validated maxfailures = 2 ret = 1 not_allowed_options = ('pgdata', 'format', 'wal-method', 'xlog-method', 'gzip', 'version', 'compress', 'dbname', 'host', 'port', 'username', 'password') user_options = self.process_user_options('basebackup', options, not_allowed_options, logger.error) for bbfailures in range(0, maxfailures): if self._postgresql.cancellable.is_cancelled: break if not self._postgresql.data_directory_empty(): self._postgresql.remove_data_directory() try: ret = self._postgresql.cancellable.call([self._postgresql.pgcommand('pg_basebackup'), '--pgdata=' + self._postgresql.data_dir, '-X', 'stream', '--dbname=' + conn_url] + user_options, env=env) if ret == 0: break else: logger.error('Error when fetching backup: pg_basebackup exited with code=%s', ret) except Exception as e: logger.error('Error when fetching backup with pg_basebackup: %s', e) if bbfailures < maxfailures - 1: logger.warning('Trying again in 5 seconds') time.sleep(5) return ret def clone(self, clone_member): """ - initialize the replica from an existing member (master or replica) - initialize the replica using the replica creation method that works without the replication connection (i.e. restore from on-disk base backup) """ ret = self.create_replica(clone_member) == 0 if ret: self._post_restore() return ret def bootstrap(self, config): """ Initialize a new node from scratch and start it. """ pg_hba = config.get('pg_hba', []) method = config.get('method') or 'initdb' if method != 'initdb' and method in config and 'command' in config[method]: self._keep_existing_recovery_conf = config[method].get('keep_existing_recovery_conf') self._running_custom_bootstrap = True do_initialize = self._custom_bootstrap else: method = 'initdb' do_initialize = self._initdb return do_initialize(config.get(method)) and self._postgresql.config.append_pg_hba(pg_hba) \ and self._postgresql.config.save_configuration_files() and self._postgresql.start() def create_or_update_role(self, name, password, options): options = list(map(str.upper, options)) if 'NOLOGIN' not in options and 'LOGIN' not in options: options.append('LOGIN') params = [name] if password: options.extend(['PASSWORD', '%s']) params.extend([password, password]) sql = """DO $$ BEGIN SET local synchronous_commit = 'local'; PERFORM * FROM pg_authid WHERE rolname = %s; IF FOUND THEN ALTER ROLE "{0}" WITH {1}; ELSE CREATE ROLE "{0}" WITH {1}; END IF; END;$$""".format(name, ' '.join(options)) self._postgresql.query('SET log_statement TO none') self._postgresql.query('SET log_min_duration_statement TO -1') self._postgresql.query("SET log_min_error_statement TO 'log'") try: self._postgresql.query(sql, *params) finally: self._postgresql.query('RESET log_min_error_statement') self._postgresql.query('RESET log_min_duration_statement') self._postgresql.query('RESET log_statement') def post_bootstrap(self, config, task): try: postgresql = self._postgresql superuser = postgresql.config.superuser if 'username' in superuser and 'password' in superuser: self.create_or_update_role(superuser['username'], superuser['password'], ['SUPERUSER']) task.complete(self.call_post_bootstrap(config)) if task.result: replication = postgresql.config.replication self.create_or_update_role(replication['username'], replication.get('password'), ['REPLICATION']) rewind = postgresql.config.rewind_credentials if not deep_compare(rewind, superuser): self.create_or_update_role(rewind['username'], rewind.get('password'), []) for f in ('pg_ls_dir(text, boolean, boolean)', 'pg_stat_file(text, boolean)', 'pg_read_binary_file(text)', 'pg_read_binary_file(text, bigint, bigint, boolean)'): sql = """DO $$ BEGIN SET local synchronous_commit = 'local'; GRANT EXECUTE ON function pg_catalog.{0} TO "{1}"; END;$$""".format(f, rewind['username']) postgresql.query(sql) for name, value in (config.get('users') or {}).items(): if all(name != a.get('username') for a in (superuser, replication, rewind)): self.create_or_update_role(name, value.get('password'), value.get('options', [])) # We were doing a custom bootstrap instead of running initdb, therefore we opened trust # access from certain addresses to be able to reach cluster and change password if self._running_custom_bootstrap: self._running_custom_bootstrap = False # If we don't have custom configuration for pg_hba.conf we need to restore original file if not postgresql.config.get('pg_hba'): os.unlink(postgresql.config.pg_hba_conf) postgresql.config.restore_configuration_files() postgresql.config.write_postgresql_conf() postgresql.config.replace_pg_ident() # at this point there should be no recovery.conf postgresql.config.remove_recovery_conf() if postgresql.config.hba_file and postgresql.config.hba_file != postgresql.config.pg_hba_conf: postgresql.restart() else: postgresql.config.replace_pg_hba() if postgresql.pending_restart: postgresql.restart() else: postgresql.reload() time.sleep(1) # give a time to postgres to "reload" configuration files postgresql.connection().close() # close connection to reconnect with a new password except Exception: logger.exception('post_bootstrap') task.complete(False) return task.result patroni-1.6.4/patroni/postgresql/callback_executor.py000066400000000000000000000017061361356115100231070ustar00rootroot00000000000000import logging from patroni.postgresql.cancellable import CancellableExecutor from threading import Condition, Thread logger = logging.getLogger(__name__) class CallbackExecutor(CancellableExecutor, Thread): def __init__(self): CancellableExecutor.__init__(self) Thread.__init__(self) self.daemon = True self._cmd = None self._condition = Condition() self.start() def call(self, cmd): self._kill_process() with self._condition: self._cmd = cmd self._condition.notify() def run(self): while True: with self._condition: if self._cmd is None: self._condition.wait() cmd, self._cmd = self._cmd, None with self._lock: if not self._start_process(cmd, close_fds=True): continue self._process.wait() self._kill_children() patroni-1.6.4/patroni/postgresql/cancellable.py000066400000000000000000000101441361356115100216560ustar00rootroot00000000000000import logging import os import psutil import subprocess from patroni.exceptions import PostgresException from patroni.utils import polling_loop from six import string_types from threading import Lock logger = logging.getLogger(__name__) class CancellableExecutor(object): def __init__(self): self._process = None self._process_cmd = None self._process_children = [] self._lock = Lock() def _start_process(self, cmd, *args, **kwargs): """This method must be executed only when the `_lock` is acquired""" try: self._process_children = [] self._process_cmd = cmd self._process = psutil.Popen(cmd, *args, **kwargs) except Exception: return logger.exception('Failed to execute %s', cmd) return True def _kill_process(self): with self._lock: if self._process is not None and self._process.is_running() and not self._process_children: try: self._process.suspend() # Suspend the process before getting list of childrens except psutil.Error as e: logger.info('Failed to suspend the process: %s', e.msg) try: self._process_children = self._process.children(recursive=True) except psutil.Error: pass try: self._process.kill() logger.warning('Killed %s because it was still running', self._process_cmd) except psutil.NoSuchProcess: pass except psutil.AccessDenied as e: logger.warning('Failed to kill the process: %s', e.msg) def _kill_children(self): waitlist = [] with self._lock: for child in self._process_children: try: child.kill() except psutil.NoSuchProcess: continue except psutil.AccessDenied as e: logger.info('Failed to kill child process: %s', e.msg) waitlist.append(child) psutil.wait_procs(waitlist) class CancellableSubprocess(CancellableExecutor): def __init__(self): super(CancellableSubprocess, self).__init__() self._is_cancelled = False def call(self, *args, **kwargs): for s in ('stdin', 'stdout', 'stderr'): kwargs.pop(s, None) communicate_input = 'communicate_input' in kwargs if communicate_input: input_data = kwargs.pop('communicate_input', None) if not isinstance(input_data, string_types): input_data = '' if input_data and input_data[-1] != '\n': input_data += '\n' kwargs['stdin'] = subprocess.PIPE kwargs['stdout'] = open(os.devnull, 'w') kwargs['stderr'] = subprocess.STDOUT try: with self._lock: if self._is_cancelled: raise PostgresException('cancelled') self._is_cancelled = False started = self._start_process(*args, **kwargs) if started: if communicate_input: if input_data: self._process.communicate(input_data) self._process.stdin.close() return self._process.wait() finally: with self._lock: self._process = None self._kill_children() def reset_is_cancelled(self): with self._lock: self._is_cancelled = False @property def is_cancelled(self): with self._lock: return self._is_cancelled def cancel(self): with self._lock: self._is_cancelled = True if self._process is None or not self._process.is_running(): return self._process.terminate() for _ in polling_loop(10): with self._lock: if self._process is None or not self._process.is_running(): return self._kill_process() patroni-1.6.4/patroni/postgresql/config.py000066400000000000000000001347141361356115100207100ustar00rootroot00000000000000import logging import os import re import shutil import socket import stat import time from patroni.exceptions import PatroniException from six.moves.urllib_parse import urlparse, parse_qsl, unquote from urllib3.response import HTTPHeaderDict from ..dcs import slot_name_from_member_name, RemoteMember from ..utils import compare_values, parse_bool, parse_int, split_host_port, uri, \ validate_directory, is_subpath logger = logging.getLogger(__name__) SYNC_STANDBY_NAME_RE = re.compile(r'^[A-Za-z_][A-Za-z_0-9\$]*$') PARAMETER_RE = re.compile(r'([a-z_]+)\s*=\s*') def quote_ident(value): """Very simplified version of quote_ident""" return value if SYNC_STANDBY_NAME_RE.match(value) else '"' + value + '"' def conninfo_uri_parse(dsn): ret = {} r = urlparse(dsn) if r.username: ret['user'] = r.username if r.password: ret['password'] = r.password if r.path[1:]: ret['dbname'] = r.path[1:] hosts = [] ports = [] for netloc in r.netloc.split('@')[-1].split(','): host = port = None if '[' in netloc and ']' in netloc: host = netloc.split(']')[0][1:] tmp = netloc.split(':', 1) if host is None: host = tmp[0] if len(tmp) == 2: host, port = tmp if host is not None: hosts.append(host) if port is not None: ports.append(port) if hosts: ret['host'] = ','.join(hosts) if ports: ret['port'] = ','.join(ports) ret = {name: unquote(value) for name, value in ret.items()} ret.update({name: value for name, value in parse_qsl(r.query)}) if ret.get('ssl') == 'true': del ret['ssl'] ret['sslmode'] = 'require' return ret def read_param_value(value): length = len(value) ret = '' is_quoted = value[0] == "'" i = int(is_quoted) while i < length: if is_quoted: if value[i] == "'": return ret, i + 1 elif value[i].isspace(): break if value[i] == '\\': i += 1 if i >= length: break ret += value[i] i += 1 return (None, None) if is_quoted else (ret, i) def conninfo_parse(dsn): ret = {} length = len(dsn) i = 0 while i < length: if dsn[i].isspace(): i += 1 continue param_match = PARAMETER_RE.match(dsn[i:]) if not param_match: return param = param_match.group(1) i += param_match.end() if i >= length: return value, end = read_param_value(dsn[i:]) if value is None: return i += end ret[param] = value return ret def parse_dsn(value): """ Very simple equivalent of `psycopg2.extensions.parse_dsn` introduced in 2.7.0. We are not using psycopg2 function in order to remain compatible with 2.5.4+. There is one minor difference though, this function removes `dbname` from the result and sets the sslmode` to `prefer` if it is not present in the connection string. This is necessary to simplify comparison of the old and the new values. >>> r = parse_dsn('postgresql://u%2Fse:pass@:%2f123,[%2Fhost2]/db%2Fsdf?application_name=mya%2Fpp&ssl=true') >>> r == {'application_name': 'mya/pp', 'host': ',/host2', 'sslmode': 'require',\ 'password': 'pass', 'port': '/123', 'user': 'u/se'} True >>> r = parse_dsn(" host = 'host' dbname = db\\\\ name requiressl=1 ") >>> r == {'host': 'host', 'sslmode': 'require'} True >>> parse_dsn('requiressl = 0\\\\') == {'sslmode': 'prefer'} True >>> parse_dsn("host=a foo = '") is None True >>> parse_dsn("host=a foo = ") is None True >>> parse_dsn("1") is None True """ if value.startswith('postgres://') or value.startswith('postgresql://'): ret = conninfo_uri_parse(value) else: ret = conninfo_parse(value) if ret: if 'sslmode' not in ret: # allow sslmode to take precedence over requiressl requiressl = ret.pop('requiressl', None) if requiressl == '1': ret['sslmode'] = 'require' elif requiressl is not None: ret['sslmode'] = 'prefer' ret.setdefault('sslmode', 'prefer') if 'dbname' in ret: del ret['dbname'] return ret def strip_comment(value): i = value.find('#') if i > -1: value = value[:i].strip() return value def read_recovery_param_value(value): """ >>> read_recovery_param_value('') is None True >>> read_recovery_param_value("'") is None True >>> read_recovery_param_value("''a") is None True >>> read_recovery_param_value('a b') is None True >>> read_recovery_param_value("'''") is None True >>> read_recovery_param_value("'\\\\") is None True >>> read_recovery_param_value("'a' s#") is None True >>> read_recovery_param_value("'\\\\'''' #a") "''" >>> read_recovery_param_value('asd') 'asd' """ value = value.strip() length = len(value) if length == 0: return None elif value[0] == "'": if length == 1: return None ret = '' i = 1 while i < length: if value[i] == '\\': i += 1 if i >= length: return None elif value[i] == "'": i += 1 if i >= length: break if value[i] in ('#', ' '): if strip_comment(value[i:]): return None break if value[i] != "'": return None ret += value[i] i += 1 else: return None return ret else: value = strip_comment(value) if not value or ' ' in value or '\\' in value: return None return value def mtime(filename): try: return os.stat(filename).st_mtime except OSError: return None class ConfigWriter(object): def __init__(self, filename): self._filename = filename self._fd = None def __enter__(self): self._fd = open(self._filename, 'w') self.writeline('# Do not edit this file manually!\n# It will be overwritten by Patroni!') return self def __exit__(self, exc_type, exc_val, exc_tb): if self._fd: self._fd.close() def writeline(self, line): self._fd.write(line) self._fd.write('\n') def writelines(self, lines): for line in lines: self.writeline(line) @staticmethod def escape(value): # Escape (by doubling) any single quotes or backslashes in given string return re.sub(r'([\'\\])', r'\1\1', str(value)) def write_param(self, param, value): self.writeline("{0} = '{1}'".format(param, self.escape(value))) class CaseInsensitiveDict(HTTPHeaderDict): def add(self, key, val): self[key] = val def __getitem__(self, key): return self._container[key.lower()][1] def __repr__(self): return str(dict(self.items())) def copy(self): return CaseInsensitiveDict(self._container.values()) class ConfigHandler(object): # List of parameters which must be always passed to postmaster as command line options # to make it not possible to change them with 'ALTER SYSTEM'. # Some of these parameters have sane default value assigned and Patroni doesn't allow # to decrease this value. E.g. 'wal_level' can't be lower then 'hot_standby' and so on. # These parameters could be changed only globally, i.e. via DCS. # P.S. 'listen_addresses' and 'port' are added here just for convenience, to mark them # as a parameters which should always be passed through command line. # # Format: # key - parameter name # value - tuple(default_value, check_function, min_version) # default_value -- some sane default value # check_function -- if the new value is not correct must return `!False` # min_version -- major version of PostgreSQL when parameter was introduced CMDLINE_OPTIONS = CaseInsensitiveDict({ 'listen_addresses': (None, lambda _: False, 90100), 'port': (None, lambda _: False, 90100), 'cluster_name': (None, lambda _: False, 90500), 'wal_level': ('hot_standby', lambda v: v.lower() in ('hot_standby', 'replica', 'logical'), 90100), 'hot_standby': ('on', lambda _: False, 90100), 'max_connections': (100, lambda v: int(v) >= 25, 90100), 'max_wal_senders': (10, lambda v: int(v) >= 3, 90100), 'wal_keep_segments': (8, lambda v: int(v) >= 1, 90100), 'max_prepared_transactions': (0, lambda v: int(v) >= 0, 90100), 'max_locks_per_transaction': (64, lambda v: int(v) >= 32, 90100), 'track_commit_timestamp': ('off', lambda v: parse_bool(v) is not None, 90500), 'max_replication_slots': (10, lambda v: int(v) >= 4, 90400), 'max_worker_processes': (8, lambda v: int(v) >= 2, 90400), 'wal_log_hints': ('on', lambda _: False, 90400) }) _RECOVERY_PARAMETERS = { 'archive_cleanup_command', 'restore_command', 'recovery_end_command', 'recovery_target', 'recovery_target_name', 'recovery_target_time', 'recovery_target_xid', 'recovery_target_lsn', 'recovery_target_inclusive', 'recovery_target_timeline', 'recovery_target_action', 'recovery_min_apply_delay', 'primary_conninfo', 'primary_slot_name', 'promote_trigger_file', 'trigger_file' } def __init__(self, postgresql, config): self._postgresql = postgresql self._config_dir = os.path.abspath(config.get('config_dir') or postgresql.data_dir) config_base_name = config.get('config_base_name', 'postgresql') self._postgresql_conf = os.path.join(self._config_dir, config_base_name + '.conf') self._postgresql_conf_mtime = None self._postgresql_base_conf_name = config_base_name + '.base.conf' self._postgresql_base_conf = os.path.join(self._config_dir, self._postgresql_base_conf_name) self._pg_hba_conf = os.path.join(self._config_dir, 'pg_hba.conf') self._pg_ident_conf = os.path.join(self._config_dir, 'pg_ident.conf') self._recovery_conf = os.path.join(postgresql.data_dir, 'recovery.conf') self._recovery_conf_mtime = None self._recovery_signal = os.path.join(postgresql.data_dir, 'recovery.signal') self._standby_signal = os.path.join(postgresql.data_dir, 'standby.signal') self._auto_conf = os.path.join(postgresql.data_dir, 'postgresql.auto.conf') self._auto_conf_mtime = None self._pgpass = os.path.abspath(config.get('pgpass') or os.path.join(os.path.expanduser('~'), 'pgpass')) if os.path.exists(self._pgpass) and not os.path.isfile(self._pgpass): raise PatroniException("'{}' exists and it's not a file, check your `postgresql.pgpass` configuration" .format(self._pgpass)) self._passfile = None self._passfile_mtime = None self._synchronous_standby_names = None self._postmaster_ctime = None self._current_recovery_params = None self._config = {} self._recovery_params = {} self.reload_config(config) def setup_server_parameters(self): self._server_parameters = self.get_server_parameters(self._config) self._adjust_recovery_parameters() def try_to_create_dir(self, d, msg): d = os.path.join(self._postgresql._data_dir, d) if (not is_subpath(self._postgresql._data_dir, d) or not self._postgresql.data_directory_empty()): validate_directory(d, msg) def check_directories(self): if "unix_socket_directories" in self._server_parameters: for d in self._server_parameters["unix_socket_directories"].split(","): self.try_to_create_dir(d.strip(), "'{}' is defined in unix_socket_directories, {}") if "stats_temp_directory" in self._server_parameters: self.try_to_create_dir(self._server_parameters["stats_temp_directory"], "'{}' is defined in stats_temp_directory, {}") self.try_to_create_dir(os.path.dirname(self._pgpass), "'{}' is defined in `postgresql.pgpass`, {}") @property def _configuration_to_save(self): configuration = [os.path.basename(self._postgresql_conf)] if 'custom_conf' not in self._config: configuration.append(os.path.basename(self._postgresql_base_conf_name)) if not self.hba_file: configuration.append('pg_hba.conf') if not self._server_parameters.get('ident_file'): configuration.append('pg_ident.conf') return configuration def save_configuration_files(self, check_custom_bootstrap=False): """ copy postgresql.conf to postgresql.conf.backup to be able to retrive configuration files - originally stored as symlinks, those are normally skipped by pg_basebackup - in case of WAL-E basebackup (see http://comments.gmane.org/gmane.comp.db.postgresql.wal-e/239) """ if not (check_custom_bootstrap and self._postgresql.bootstrap.running_custom_bootstrap): try: for f in self._configuration_to_save: config_file = os.path.join(self._config_dir, f) backup_file = os.path.join(self._postgresql.data_dir, f + '.backup') if os.path.isfile(config_file): shutil.copy(config_file, backup_file) except IOError: logger.exception('unable to create backup copies of configuration files') return True def restore_configuration_files(self): """ restore a previously saved postgresql.conf """ try: for f in self._configuration_to_save: config_file = os.path.join(self._config_dir, f) backup_file = os.path.join(self._postgresql.data_dir, f + '.backup') if not os.path.isfile(config_file): if os.path.isfile(backup_file): shutil.copy(backup_file, config_file) # Previously we didn't backup pg_ident.conf, if file is missing just create empty elif f == 'pg_ident.conf': open(config_file, 'w').close() except IOError: logger.exception('unable to restore configuration files from backup') def write_postgresql_conf(self, configuration=None): # rename the original configuration if it is necessary if 'custom_conf' not in self._config and not os.path.exists(self._postgresql_base_conf): os.rename(self._postgresql_conf, self._postgresql_base_conf) with ConfigWriter(self._postgresql_conf) as f: include = self._config.get('custom_conf') or self._postgresql_base_conf_name f.writeline("include '{0}'\n".format(ConfigWriter.escape(include))) for name, value in sorted((configuration or self._server_parameters).items()): if (not self._postgresql.bootstrap.running_custom_bootstrap or name != 'hba_file') \ and name not in self._RECOVERY_PARAMETERS: f.write_param(name, value) # when we are doing custom bootstrap we assume that we don't know superuser password # and in order to be able to change it, we are opening trust access from a certain address # therefore we need to make sure that hba_file is not overriden # after changing superuser password we will "revert" all these "changes" if self._postgresql.bootstrap.running_custom_bootstrap or 'hba_file' not in self._server_parameters: f.write_param('hba_file', self._pg_hba_conf) if 'ident_file' not in self._server_parameters: f.write_param('ident_file', self._pg_ident_conf) if self._postgresql.major_version >= 120000: if self._recovery_params: f.writeline('\n# recovery.conf') self._write_recovery_params(f, self._recovery_params) if not self._postgresql.bootstrap.keep_existing_recovery_conf: self._sanitize_auto_conf() def append_pg_hba(self, config): if not self.hba_file and not self._config.get('pg_hba'): with open(self._pg_hba_conf, 'a') as f: f.write('\n{}\n'.format('\n'.join(config))) return True def replace_pg_hba(self): """ Replace pg_hba.conf content in the PGDATA if hba_file is not defined in the `postgresql.parameters` and pg_hba is defined in `postgresql` configuration section. :returns: True if pg_hba.conf was rewritten. """ # when we are doing custom bootstrap we assume that we don't know superuser password # and in order to be able to change it, we are opening trust access from a certain address if self._postgresql.bootstrap.running_custom_bootstrap: addresses = {} if os.name == 'nt' else {'': 'local'} # windows doesn't yet support unix-domain sockets if 'host' in self.local_replication_address and not self.local_replication_address['host'].startswith('/'): addresses.update({sa[0] + '/32': 'host' for _, _, _, _, sa in socket.getaddrinfo( self.local_replication_address['host'], self.local_replication_address['port'], 0, socket.SOCK_STREAM, socket.IPPROTO_TCP)}) with ConfigWriter(self._pg_hba_conf) as f: for address, t in addresses.items(): f.writeline(( '{0}\treplication\t{1}\t{3}\ttrust\n' '{0}\tall\t{2}\t{3}\ttrust' ).format(t, self.replication['username'], self._superuser.get('username') or 'all', address)) elif not self.hba_file and self._config.get('pg_hba'): with ConfigWriter(self._pg_hba_conf) as f: f.writelines(self._config['pg_hba']) return True def replace_pg_ident(self): """ Replace pg_ident.conf content in the PGDATA if ident_file is not defined in the `postgresql.parameters` and pg_ident is defined in the `postgresql` section. :returns: True if pg_ident.conf was rewritten. """ if not self._server_parameters.get('ident_file') and self._config.get('pg_ident'): with ConfigWriter(self._pg_ident_conf) as f: f.writelines(self._config['pg_ident']) return True def primary_conninfo_params(self, member): if not (member and member.conn_url) or member.name == self._postgresql.name: return None ret = member.conn_kwargs(self.replication) ret['application_name'] = self._postgresql.name ret.setdefault('sslmode', 'prefer') if self._krbsrvname: ret['krbsrvname'] = self._krbsrvname if 'database' in ret: del ret['database'] return ret def format_dsn(self, params, include_dbname=False): # A list of keywords that can be found in a conninfo string. Follows what is acceptable by libpq keywords = ('dbname', 'user', 'passfile' if params.get('passfile') else 'password', 'host', 'port', 'sslmode', 'sslcompression', 'sslcert', 'sslkey', 'sslrootcert', 'sslcrl', 'application_name', 'krbsrvname') if include_dbname: params = params.copy() params['dbname'] = params.get('database') or self._postgresql.database # we are abusing information about the necessity of dbname # dsn should contain passfile or password only if there is no dbname in it (it is used in recovery.conf) skip = {'passfile', 'password'} else: skip = {'dbname'} def escape(value): return re.sub(r'([\'\\ ])', r'\\\1', str(value)) return ' '.join('{0}={1}'.format(kw, escape(params[kw])) for kw in keywords if kw not in skip and params.get(kw) is not None) def _write_recovery_params(self, fd, recovery_params): for name, value in sorted(recovery_params.items()): if name == 'primary_conninfo': if 'password' in value and self._postgresql.major_version >= 100000: self.write_pgpass(value) value['passfile'] = self._passfile = self._pgpass self._passfile_mtime = mtime(self._pgpass) value = self.format_dsn(value) fd.write_param(name, value) def build_recovery_params(self, member): recovery_params = CaseInsensitiveDict({p: v for p, v in self.get('recovery_conf', {}).items() if not p.lower().startswith('recovery_target') and p.lower() not in ('primary_conninfo', 'primary_slot_name')}) recovery_params.update({'standby_mode': 'on', 'recovery_target_timeline': 'latest'}) if self._postgresql.major_version >= 120000: # on pg12 we want to protect from following params being set in one of included files # not doing so might result in a standby being paused, promoted or shutted down. recovery_params.update({'recovery_target': '', 'recovery_target_name': '', 'recovery_target_time': '', 'recovery_target_xid': '', 'recovery_target_lsn': ''}) is_remote_master = isinstance(member, RemoteMember) primary_conninfo = self.primary_conninfo_params(member) if primary_conninfo: use_slots = self.get('use_slots', True) and self._postgresql.major_version >= 90400 if use_slots and not (is_remote_master and member.no_replication_slot): primary_slot_name = member.primary_slot_name if is_remote_master else self._postgresql.name recovery_params['primary_slot_name'] = slot_name_from_member_name(primary_slot_name) recovery_params['primary_conninfo'] = primary_conninfo # standby_cluster config might have different parameters, we want to override them standby_cluster_params = ['restore_command', 'archive_cleanup_command']\ + (['recovery_min_apply_delay'] if is_remote_master else []) recovery_params.update({p: member.data.get(p) for p in standby_cluster_params if member and member.data.get(p)}) return recovery_params def recovery_conf_exists(self): if self._postgresql.major_version >= 120000: return os.path.exists(self._standby_signal) or os.path.exists(self._recovery_signal) return os.path.exists(self._recovery_conf) @property def _triggerfile_good_name(self): return 'trigger_file' if self._postgresql.major_version < 120000 else 'promote_trigger_file' @property def _triggerfile_wrong_name(self): return 'trigger_file' if self._postgresql.major_version >= 120000 else 'promote_trigger_file' @property def _recovery_parameters_to_compare(self): skip_params = {'recovery_target_inclusive', 'recovery_target_action', self._triggerfile_wrong_name} return self._RECOVERY_PARAMETERS - skip_params def _read_recovery_params(self): pg_conf_mtime = mtime(self._postgresql_conf) auto_conf_mtime = mtime(self._auto_conf) passfile_mtime = mtime(self._passfile) if self._passfile else False postmaster_ctime = self._postgresql.is_running() if postmaster_ctime: postmaster_ctime = postmaster_ctime.create_time() if self._postgresql_conf_mtime == pg_conf_mtime and self._auto_conf_mtime == auto_conf_mtime \ and self._passfile_mtime == passfile_mtime and self._postmaster_ctime == postmaster_ctime: return None, False try: values = self._get_pg_settings(self._recovery_parameters_to_compare).values() values = {p[0]: [p[1], p[4] == 'postmaster', p[5]] for p in values} self._postgresql_conf_mtime = pg_conf_mtime self._auto_conf_mtime = auto_conf_mtime self._postmaster_ctime = postmaster_ctime except Exception: values = None return values, True def _read_recovery_params_pre_v12(self): recovery_conf_mtime = mtime(self._recovery_conf) passfile_mtime = mtime(self._passfile) if self._passfile else False if recovery_conf_mtime == self._recovery_conf_mtime and passfile_mtime == self._passfile_mtime: return None, False values = {} with open(self._recovery_conf, 'r') as f: for line in f: line = line.strip() if not line or line.startswith('#'): continue value = None match = PARAMETER_RE.match(line) if match: value = read_recovery_param_value(line[match.end():]) if value is None: return None, True values[match.group(1)] = [value, True] self._recovery_conf_mtime = recovery_conf_mtime values.setdefault('recovery_min_apply_delay', ['0', True]) values.update({param: ['', True] for param in self._recovery_parameters_to_compare if param not in values}) return values, True def _check_passfile(self, passfile, wanted_primary_conninfo): # If there is a passfile in the primary_conninfo try to figure out that # the passfile contains the line allowing connection to the given node. # We assume that the passfile was created by Patroni and therefore doing # the full match and not covering cases when host, port or user are set to '*' passfile_mtime = mtime(passfile) if passfile_mtime: try: with open(passfile) as f: wanted_line = self._pgpass_line(wanted_primary_conninfo).strip() for raw_line in f: if raw_line.strip() == wanted_line: self._passfile = passfile self._passfile_mtime = passfile_mtime return True except Exception: logger.info('Failed to read %s', passfile) return False def _check_primary_conninfo(self, primary_conninfo, wanted_primary_conninfo): # first we will cover corner cases, when we are replicating from somewhere while shouldn't # or there is no primary_conninfo but we should replicate from some specific node. if not wanted_primary_conninfo: return not primary_conninfo elif not primary_conninfo: return False if 'passfile' in primary_conninfo and 'password' not in primary_conninfo \ and 'password' in wanted_primary_conninfo: if self._check_passfile(primary_conninfo['passfile'], wanted_primary_conninfo): primary_conninfo['password'] = wanted_primary_conninfo['password'] else: return False return all(primary_conninfo.get(p) == str(v) for p, v in wanted_primary_conninfo.items()) def check_recovery_conf(self, member): """Returns a tuple. The first boolean element indicates that recovery params don't match and the second is set to `True` if the restart is required in order to apply new values""" # TODO: recovery.conf could be stale, would be nice to detect that. if self._postgresql.major_version >= 120000: if not os.path.exists(self._standby_signal): return True, True _read_recovery_params = self._read_recovery_params else: if not self.recovery_conf_exists(): return True, True _read_recovery_params = self._read_recovery_params_pre_v12 params, updated = _read_recovery_params() # updated indicates that mtime of postgresql.conf, postgresql.auto.conf, or recovery.conf # was changed and params were read either from the config or from the database connection. if updated: if params is None: # exception or unparsable config return True, True # We will cache parsed value until the next config change. self._current_recovery_params = params primary_conninfo = params['primary_conninfo'] if primary_conninfo[0]: primary_conninfo[0] = parse_dsn(params['primary_conninfo'][0]) # If we failed to parse non-empty connection string this indicates that config if broken. if not primary_conninfo[0]: return True, True else: # empty string, primary_conninfo is not in the config primary_conninfo[0] = {} required = {'restart': 0, 'reload': 0} def record_missmatch(mtype): required['restart' if mtype else 'reload'] += 1 wanted_recovery_params = self.build_recovery_params(member) for param, value in self._current_recovery_params.items(): # Skip certain parameters defined in the included postgres config files # if we know that they are not specified in the patroni configuration. if len(value) > 2 and value[2] not in (self._postgresql_conf, self._auto_conf) and \ param in ('archive_cleanup_command', 'promote_trigger_file', 'recovery_end_command', 'recovery_min_apply_delay', 'restore_command') and param not in wanted_recovery_params: continue if param == 'recovery_min_apply_delay': if not compare_values('integer', 'ms', value[0], wanted_recovery_params.get(param, 0)): record_missmatch(value[1]) elif param == 'primary_conninfo': if not self._check_primary_conninfo(value[0], wanted_recovery_params.get('primary_conninfo', {})): record_missmatch(value[1]) elif (param != 'primary_slot_name' or wanted_recovery_params.get('primary_conninfo')) \ and str(value[0]) != str(wanted_recovery_params.get(param, '')): record_missmatch(value[1]) return required['restart'] + required['reload'] > 0, required['restart'] > 0 @staticmethod def _remove_file_if_exists(name): if os.path.isfile(name) or os.path.islink(name): os.unlink(name) @staticmethod def _pgpass_line(record): if 'password' in record: def escape(value): return re.sub(r'([:\\])', r'\\\1', str(value)) record = {n: escape(record.get(n, '*')) for n in ('host', 'port', 'user', 'password')} return '{host}:{port}:*:{user}:{password}'.format(**record) def write_pgpass(self, record): line = self._pgpass_line(record) if not line: return os.environ.copy() with open(self._pgpass, 'w') as f: os.chmod(self._pgpass, stat.S_IWRITE | stat.S_IREAD) f.write(line) env = os.environ.copy() env['PGPASSFILE'] = self._pgpass return env def write_recovery_conf(self, recovery_params): if self._postgresql.major_version >= 120000: if parse_bool(recovery_params.pop('standby_mode', None)): open(self._standby_signal, 'w').close() else: self._remove_file_if_exists(self._standby_signal) open(self._recovery_signal, 'w').close() self._recovery_params = recovery_params else: with ConfigWriter(self._recovery_conf) as f: os.chmod(self._recovery_conf, stat.S_IWRITE | stat.S_IREAD) self._write_recovery_params(f, recovery_params) def remove_recovery_conf(self): for name in (self._recovery_conf, self._standby_signal, self._recovery_signal): self._remove_file_if_exists(name) self._recovery_params = {} def _sanitize_auto_conf(self): overwrite = False lines = [] if os.path.exists(self._auto_conf): try: with open(self._auto_conf) as f: for raw_line in f: line = raw_line.strip() match = PARAMETER_RE.match(line) if match and match.group(1).lower() in self._RECOVERY_PARAMETERS: overwrite = True else: lines.append(raw_line) except Exception: logger.info('Failed to read %s', self._auto_conf) if overwrite: try: with open(self._auto_conf, 'w') as f: for raw_line in lines: f.write(raw_line) except Exception: logger.exception('Failed to remove some unwanted parameters from %s', self._auto_conf) def _adjust_recovery_parameters(self): # It is not strictly necessary, but we can make patroni configs crossi-compatible with all postgres versions. recovery_conf = {n: v for n, v in self._server_parameters.items() if n.lower() in self._RECOVERY_PARAMETERS} if recovery_conf: self._config['recovery_conf'] = recovery_conf if self.get('recovery_conf'): value = self._config['recovery_conf'].pop(self._triggerfile_wrong_name, None) if self._triggerfile_good_name not in self._config['recovery_conf'] and value: self._config['recovery_conf'][self._triggerfile_good_name] = value def get_server_parameters(self, config): parameters = config['parameters'].copy() listen_addresses, port = split_host_port(config['listen'], 5432) parameters.update(cluster_name=self._postgresql.scope, listen_addresses=listen_addresses, port=str(port)) if config.get('synchronous_mode', False): if self._synchronous_standby_names is None: if config.get('synchronous_mode_strict', False): parameters['synchronous_standby_names'] = '*' else: parameters.pop('synchronous_standby_names', None) else: parameters['synchronous_standby_names'] = self._synchronous_standby_names if self._postgresql.major_version >= 90600 and parameters['wal_level'] == 'hot_standby': parameters['wal_level'] = 'replica' ret = CaseInsensitiveDict({k: v for k, v in parameters.items() if not self._postgresql.major_version or self._postgresql.major_version >= self.CMDLINE_OPTIONS.get(k, (0, 1, 90100))[2]}) ret.update({k: os.path.join(self._config_dir, ret[k]) for k in ('hba_file', 'ident_file') if k in ret}) return ret @staticmethod def _get_unix_local_address(unix_socket_directories): for d in unix_socket_directories.split(','): d = d.strip() if d.startswith('/'): # Only absolute path can be used to connect via unix-socket return d return '' def _get_tcp_local_address(self): listen_addresses = self._server_parameters['listen_addresses'].split(',') for la in listen_addresses: if la.strip().lower() in ('*', '0.0.0.0', '127.0.0.1', 'localhost'): # we are listening on '*' or localhost return 'localhost' # connection via localhost is preferred return listen_addresses[0].strip() # can't use localhost, take first address from listen_addresses @property def local_connect_kwargs(self): ret = self._local_address.copy() # add all of the other connection settings that are available ret.update(self._superuser) # if the "username" parameter is present, it actually needs to be "user" # for connecting to PostgreSQL if 'username' in self._superuser: ret['user'] = self._superuser['username'] del ret['username'] # ensure certain Patroni configurations are available ret.update({'database': self._postgresql.database, 'fallback_application_name': 'Patroni', 'connect_timeout': 3, 'options': '-c statement_timeout=2000'}) return ret def resolve_connection_addresses(self): port = self._server_parameters['port'] tcp_local_address = self._get_tcp_local_address() local_address = {'port': port} if self._config.get('use_unix_socket'): unix_socket_directories = self._server_parameters.get('unix_socket_directories') if unix_socket_directories is not None: # fallback to tcp if unix_socket_directories is set, but there are no sutable values local_address['host'] = self._get_unix_local_address(unix_socket_directories) or tcp_local_address # if unix_socket_directories is not specified, but use_unix_socket is set to true - do our best # to use default value, i.e. don't specify a host neither in connection url nor arguments else: local_address['host'] = tcp_local_address self._local_address = local_address self.local_replication_address = {'host': tcp_local_address, 'port': port} netloc = self._config.get('connect_address') or tcp_local_address + ':' + port self._postgresql.connection_string = uri('postgres', netloc, self._postgresql.database) self._postgresql.set_connection_kwargs(self.local_connect_kwargs) def _get_pg_settings(self, names): return {r[0]: r for r in self._postgresql.query(('SELECT name, setting, unit, vartype, context, sourcefile' + ' FROM pg_catalog.pg_settings ' + ' WHERE pg_catalog.lower(name) = ANY(%s)'), [n.lower() for n in names])} @staticmethod def _handle_wal_buffers(old_values, changes): wal_block_size = parse_int(old_values['wal_block_size'][1]) wal_segment_size = old_values['wal_segment_size'] wal_segment_unit = parse_int(wal_segment_size[2], 'B') if wal_segment_size[2][0].isdigit() else 1 wal_segment_size = parse_int(wal_segment_size[1]) * wal_segment_unit / wal_block_size default_wal_buffers = min(max(parse_int(old_values['shared_buffers'][1]) / 32, 8), wal_segment_size) wal_buffers = old_values['wal_buffers'] new_value = str(changes['wal_buffers'] or -1) new_value = default_wal_buffers if new_value == '-1' else parse_int(new_value, wal_buffers[2]) old_value = default_wal_buffers if wal_buffers[1] == '-1' else parse_int(*wal_buffers[1:3]) if new_value == old_value: del changes['wal_buffers'] def reload_config(self, config, sighup=False): self._superuser = config['authentication'].get('superuser', {}) server_parameters = self.get_server_parameters(config) conf_changed = hba_changed = ident_changed = local_connection_address_changed = pending_restart = False if self._postgresql.state == 'running': changes = CaseInsensitiveDict({p: v for p, v in server_parameters.items() if p.lower() not in self._RECOVERY_PARAMETERS}) changes.update({p: None for p in self._server_parameters.keys() if not (p in changes or p.lower() in self._RECOVERY_PARAMETERS)}) if changes: if 'wal_buffers' in changes: # we need to calculate the default value of wal_buffers undef = [p for p in ('shared_buffers', 'wal_segment_size', 'wal_block_size') if p not in changes] changes.update({p: None for p in undef}) # XXX: query can raise an exception old_values = self._get_pg_settings(changes.keys()) if 'wal_buffers' in changes: self._handle_wal_buffers(old_values, changes) for p in undef: del changes[p] for r in old_values.values(): if r[4] != 'internal' and r[0] in changes: new_value = changes.pop(r[0]) if new_value is None or not compare_values(r[3], r[2], r[1], new_value): conf_changed = True if r[4] == 'postmaster': pending_restart = True logger.info('Changed %s from %s to %s (restart might be required)', r[0], r[1], new_value) if config.get('use_unix_socket') and r[0] == 'unix_socket_directories'\ or r[0] in ('listen_addresses', 'port'): local_connection_address_changed = True else: logger.info('Changed %s from %s to %s', r[0], r[1], new_value) for param, value in changes.items(): if '.' in param: # Check that user-defined-paramters have changed (parameters with period in name) if value is None or param not in self._server_parameters \ or str(value) != str(self._server_parameters[param]): logger.info('Changed %s from %s to %s', param, self._server_parameters.get(param), value) conf_changed = True elif param in server_parameters: logger.warning('Removing invalid parameter `%s` from postgresql.parameters', param) server_parameters.pop(param) if not server_parameters.get('hba_file') and config.get('pg_hba'): hba_changed = self._config.get('pg_hba', []) != config['pg_hba'] if not server_parameters.get('ident_file') and config.get('pg_ident'): ident_changed = self._config.get('pg_ident', []) != config['pg_ident'] self._config = config self._postgresql.set_pending_restart(pending_restart) self._server_parameters = server_parameters self._adjust_recovery_parameters() self._krbsrvname = config.get('krbsrvname') # for not so obvious connection attempts that may happen outside of pyscopg2 if self._krbsrvname: os.environ['PGKRBSRVNAME'] = self._krbsrvname if not local_connection_address_changed: self.resolve_connection_addresses() if conf_changed: self.write_postgresql_conf() if hba_changed: self.replace_pg_hba() if ident_changed: self.replace_pg_ident() if sighup or conf_changed or hba_changed or ident_changed: logger.info('Reloading PostgreSQL configuration.') self._postgresql.reload() if self._postgresql.major_version >= 90500: time.sleep(1) try: pending_restart = self._postgresql.query('SELECT COUNT(*) FROM pg_catalog.pg_settings' ' WHERE pending_restart').fetchone()[0] > 0 self._postgresql.set_pending_restart(pending_restart) except Exception as e: logger.warning('Exception %r when running query', e) else: logger.info('No PostgreSQL configuration items changed, nothing to reload.') def set_synchronous_standby(self, name): """Sets a node to be synchronous standby and if changed does a reload for PostgreSQL.""" if name and name != '*': name = quote_ident(name) if name != self._synchronous_standby_names: if name is None: self._server_parameters.pop('synchronous_standby_names', None) else: self._server_parameters['synchronous_standby_names'] = name self._synchronous_standby_names = name if self._postgresql.state == 'running': self.write_postgresql_conf() self._postgresql.reload() @property def effective_configuration(self): """It might happen that the current value of one (or more) below parameters stored in the controldata is higher than the value stored in the global cluster configuration. Example: max_connections in global configuration is 100, but in controldata `Current max_connections setting: 200`. If we try to start postgres with max_connections=100, it will immediately exit. As a workaround we will start it with the values from controldata and set `pending_restart` to true as an indicator that current values of parameters are not matching expectations.""" if self._postgresql.role == 'master': return self._server_parameters options_mapping = { 'max_connections': 'max_connections setting', 'max_prepared_transactions': 'max_prepared_xacts setting', 'max_locks_per_transaction': 'max_locks_per_xact setting' } if self._postgresql.major_version >= 90400: options_mapping['max_worker_processes'] = 'max_worker_processes setting' if self._postgresql.major_version >= 120000: options_mapping['max_wal_senders'] = 'max_wal_senders setting' data = self._postgresql.controldata() effective_configuration = self._server_parameters.copy() for name, cname in options_mapping.items(): value = parse_int(effective_configuration[name]) cvalue = parse_int(data[cname]) if cvalue > value: effective_configuration[name] = cvalue self._postgresql.set_pending_restart(True) return effective_configuration @property def replication(self): return self._config['authentication']['replication'] @property def superuser(self): return self._superuser @property def rewind_credentials(self): return self._config['authentication'].get('rewind', self._superuser) \ if self._postgresql.major_version >= 110000 else self._superuser @property def hba_file(self): return self._server_parameters.get('hba_file') @property def pg_hba_conf(self): return self._pg_hba_conf @property def postgresql_conf(self): return self._postgresql_conf def get(self, key, default=None): return self._config.get(key, default) patroni-1.6.4/patroni/postgresql/connection.py000066400000000000000000000026671361356115100216030ustar00rootroot00000000000000import logging import psycopg2 from contextlib import contextmanager from threading import Lock logger = logging.getLogger(__name__) class Connection(object): def __init__(self): self._lock = Lock() self._connection = None self._cursor_holder = None def set_conn_kwargs(self, conn_kwargs): self._conn_kwargs = conn_kwargs def get(self): with self._lock: if not self._connection or self._connection.closed != 0: self._connection = psycopg2.connect(**self._conn_kwargs) self._connection.autocommit = True self.server_version = self._connection.server_version return self._connection def cursor(self): if not self._cursor_holder or self._cursor_holder.closed or self._cursor_holder.connection.closed != 0: logger.info("establishing a new patroni connection to the postgres cluster") self._cursor_holder = self.get().cursor() return self._cursor_holder def close(self): if self._connection and self._connection.closed == 0: self._connection.close() logger.info("closed patroni connection to the postgresql cluster") self._cursor_holder = self._connection = None @contextmanager def get_connection_cursor(**kwargs): with psycopg2.connect(**kwargs) as conn: conn.autocommit = True with conn.cursor() as cur: yield cur patroni-1.6.4/patroni/postgresql/misc.py000066400000000000000000000043711361356115100203710ustar00rootroot00000000000000import logging from patroni.exceptions import PostgresException logger = logging.getLogger(__name__) def postgres_version_to_int(pg_version): """Convert the server_version to integer >>> postgres_version_to_int('9.5.3') 90503 >>> postgres_version_to_int('9.3.13') 90313 >>> postgres_version_to_int('10.1') 100001 >>> postgres_version_to_int('10') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... PostgresException: 'Invalid PostgreSQL version format: X.Y or X.Y.Z is accepted: 10' >>> postgres_version_to_int('9.6') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... PostgresException: 'Invalid PostgreSQL version format: X.Y or X.Y.Z is accepted: 9.6' >>> postgres_version_to_int('a.b.c') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... PostgresException: 'Invalid PostgreSQL version: a.b.c' """ try: components = list(map(int, pg_version.split('.'))) except ValueError: raise PostgresException('Invalid PostgreSQL version: {0}'.format(pg_version)) if len(components) < 2 or len(components) == 2 and components[0] < 10 or len(components) > 3: raise PostgresException('Invalid PostgreSQL version format: X.Y or X.Y.Z is accepted: {0}'.format(pg_version)) if len(components) == 2: # new style verion numbers, i.e. 10.1 becomes 100001 components.insert(1, 0) return int(''.join('{0:02d}'.format(c) for c in components)) def postgres_major_version_to_int(pg_version): """ >>> postgres_major_version_to_int('10') 100000 >>> postgres_major_version_to_int('9.6') 90600 """ return postgres_version_to_int(pg_version + '.0') def parse_lsn(lsn): t = lsn.split('/') return int(t[0], 16) * 0x100000000 + int(t[1], 16) def parse_history(data): for line in data.split('\n'): values = line.strip().split('\t') if len(values) == 3: try: values[0] = int(values[0]) values[1] = parse_lsn(values[1]) yield values except (IndexError, ValueError): logger.exception('Exception when parsing timeline history line "%s"', values) patroni-1.6.4/patroni/postgresql/postmaster.py000066400000000000000000000217251361356115100216410ustar00rootroot00000000000000import logging import multiprocessing import os import psutil import re import signal import subprocess import sys from patroni import PATRONI_ENV_PREFIX # avoid spawning the resource tracker process if sys.version_info >= (3, 8): # pragma: no cover import multiprocessing.resource_tracker multiprocessing.resource_tracker.getfd = lambda: 0 elif sys.version_info >= (3, 4): # pragma: no cover import multiprocessing.semaphore_tracker multiprocessing.semaphore_tracker.getfd = lambda: 0 logger = logging.getLogger(__name__) STOP_SIGNALS = { 'smart': 'TERM', 'fast': 'INT', 'immediate': 'QUIT', } def pg_ctl_start(conn, cmdline, env): if os.name != 'nt': os.setsid() try: postmaster = subprocess.Popen(cmdline, close_fds=True, env=env) conn.send(postmaster.pid) except Exception: logger.exception('Failed to execute %s', cmdline) conn.send(None) conn.close() class PostmasterProcess(psutil.Process): def __init__(self, pid): self.is_single_user = False if pid < 0: pid = -pid self.is_single_user = True super(PostmasterProcess, self).__init__(pid) @staticmethod def _read_postmaster_pidfile(data_dir): """Reads and parses postmaster.pid from the data directory :returns dictionary of values if successful, empty dictionary otherwise """ pid_line_names = ['pid', 'data_dir', 'start_time', 'port', 'socket_dir', 'listen_addr', 'shmem_key'] try: with open(os.path.join(data_dir, 'postmaster.pid')) as f: return {name: line.rstrip('\n') for name, line in zip(pid_line_names, f)} except IOError: return {} def _is_postmaster_process(self): try: start_time = int(self._postmaster_pid.get('start_time', 0)) if start_time and abs(self.create_time() - start_time) > 3: logger.info('Process %s is not postmaster, too much difference between PID file start time %s and ' 'process start time %s', self.pid, self.create_time(), start_time) return False except ValueError: logger.warning('Garbage start time value in pid file: %r', self._postmaster_pid.get('start_time')) # Extra safety check. The process can't be ourselves, our parent or our direct child. if self.pid == os.getpid() or self.pid == os.getppid() or self.ppid() == os.getpid(): logger.info('Patroni (pid=%s, ppid=%s), "fake postmaster" (pid=%s, ppid=%s)', os.getpid(), os.getppid(), self.pid, self.ppid()) return False return True @classmethod def _from_pidfile(cls, data_dir): postmaster_pid = PostmasterProcess._read_postmaster_pidfile(data_dir) try: pid = int(postmaster_pid.get('pid', 0)) if pid: proc = cls(pid) proc._postmaster_pid = postmaster_pid return proc except ValueError: pass @staticmethod def from_pidfile(data_dir): try: proc = PostmasterProcess._from_pidfile(data_dir) return proc if proc and proc._is_postmaster_process() else None except psutil.NoSuchProcess: return None @classmethod def from_pid(cls, pid): try: return cls(pid) except psutil.NoSuchProcess: return None def signal_stop(self, mode, pg_ctl='pg_ctl'): """Signal postmaster process to stop :returns None if signaled, True if process is already gone, False if error """ if self.is_single_user: logger.warning("Cannot stop server; single-user server is running (PID: {0})".format(self.pid)) return False if os.name != 'posix': return self.pg_ctl_kill(mode, pg_ctl) try: self.send_signal(getattr(signal, 'SIG' + STOP_SIGNALS[mode])) except psutil.NoSuchProcess: return True except psutil.AccessDenied as e: logger.warning("Could not send stop signal to PostgreSQL (error: {0})".format(e)) return False return None def pg_ctl_kill(self, mode, pg_ctl): try: status = subprocess.call([pg_ctl, "kill", STOP_SIGNALS[mode], str(self.pid)]) except OSError: return False if status == 0: return None else: return not self.is_running() def wait_for_user_backends_to_close(self): # These regexps are cross checked against versions PostgreSQL 9.1 .. 11 aux_proc_re = re.compile("(?:postgres:)( .*:)? (?:(?:archiver|startup|autovacuum launcher|autovacuum worker|" "checkpointer|logger|stats collector|wal receiver|wal writer|writer)(?: process )?|" "walreceiver|wal sender process|walsender|walwriter|background writer|" "logical replication launcher|logical replication worker for|bgworker:) ") try: children = self.children() except psutil.Error: return logger.debug('Failed to get list of postmaster children') user_backends = [] user_backends_cmdlines = [] for child in children: try: cmdline = child.cmdline()[0] if not aux_proc_re.match(cmdline): user_backends.append(child) user_backends_cmdlines.append(cmdline) except psutil.NoSuchProcess: pass if user_backends: logger.debug('Waiting for user backends %s to close', ', '.join(user_backends_cmdlines)) psutil.wait_procs(user_backends) logger.debug("Backends closed") @staticmethod def start(pgcommand, data_dir, conf, options): # Unfortunately `pg_ctl start` does not return postmaster pid to us. Without this information # it is hard to know the current state of postgres startup, so we had to reimplement pg_ctl start # in python. It will start postgres, wait for port to be open and wait until postgres will start # accepting connections. # Important!!! We can't just start postgres using subprocess.Popen, because in this case it # will be our child for the rest of our live and we will have to take care of it (`waitpid`). # So we will use the same approach as pg_ctl uses: start a new process, which will start postgres. # This process will write postmaster pid to stdout and exit immediately. Now it's responsibility # of init process to take care about postmaster. # In order to make everything portable we can't use fork&exec approach here, so we will call # ourselves and pass list of arguments which must be used to start postgres. # On Windows, in order to run a side-by-side assembly the specified env must include a valid SYSTEMROOT. env = {p: os.environ[p] for p in os.environ if not p.startswith(PATRONI_ENV_PREFIX)} try: proc = PostmasterProcess._from_pidfile(data_dir) if proc and not proc._is_postmaster_process(): # Upon start postmaster process performs various safety checks if there is a postmaster.pid # file in the data directory. Although Patroni already detected that the running process # corresponding to the postmaster.pid is not a postmaster, the new postmaster might fail # to start, because it thinks that postmaster.pid is already locked. # Important!!! Unlink of postmaster.pid isn't an option, because it has a lot of nasty race conditions. # Luckily there is a workaround to this problem, we can pass the pid from postmaster.pid # in the `PG_GRANDPARENT_PID` environment variable and postmaster will ignore it. logger.info("Telling pg_ctl that it is safe to ignore postmaster.pid for process %s", proc.pid) env['PG_GRANDPARENT_PID'] = str(proc.pid) except psutil.NoSuchProcess: pass cmdline = [pgcommand, '-D', data_dir, '--config-file={}'.format(conf)] + options logger.debug("Starting postgres: %s", " ".join(cmdline)) ctx = multiprocessing.get_context('spawn') if sys.version_info >= (3, 4) else multiprocessing parent_conn, child_conn = ctx.Pipe(False) proc = ctx.Process(target=pg_ctl_start, args=(child_conn, cmdline, env)) proc.start() pid = parent_conn.recv() proc.join() if pid is None: return logger.info('postmaster pid=%s', pid) # TODO: In an extremely unlikely case, the process could have exited and the pid reassigned. The start # initiation time is not accurate enough to compare to create time as start time would also likely # be relatively close. We need the subprocess extract pid+start_time in a race free manner. return PostmasterProcess.from_pid(pid) patroni-1.6.4/patroni/postgresql/rewind.py000066400000000000000000000223621361356115100207260ustar00rootroot00000000000000import logging import os import subprocess from patroni.dcs import Leader from patroni.postgresql.connection import get_connection_cursor from patroni.postgresql.misc import parse_history, parse_lsn logger = logging.getLogger(__name__) REWIND_STATUS = type('Enum', (), {'INITIAL': 0, 'CHECKPOINT': 1, 'CHECK': 2, 'NEED': 3, 'NOT_NEED': 4, 'SUCCESS': 5, 'FAILED': 6}) class Rewind(object): def __init__(self, postgresql): self._postgresql = postgresql self.reset_state() @staticmethod def configuration_allows_rewind(data): return data.get('wal_log_hints setting', 'off') == 'on' or data.get('Data page checksum version', '0') != '0' @property def can_rewind(self): """ check if pg_rewind executable is there and that pg_controldata indicates we have either wal_log_hints or checksums turned on """ # low-hanging fruit: check if pg_rewind configuration is there if not self._postgresql.config.get('use_pg_rewind'): return False cmd = [self._postgresql.pgcommand('pg_rewind'), '--help'] try: ret = subprocess.call(cmd, stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT) if ret != 0: # pg_rewind is not there, close up the shop and go home return False except OSError: return False return self.configuration_allows_rewind(self._postgresql.controldata()) @property def can_rewind_or_reinitialize_allowed(self): return self._postgresql.config.get('remove_data_directory_on_diverged_timelines') or self.can_rewind def trigger_check_diverged_lsn(self): if self.can_rewind_or_reinitialize_allowed and self._state != REWIND_STATUS.NEED: self._state = REWIND_STATUS.CHECK def check_leader_is_not_in_recovery(self, **kwargs): if not kwargs.get('database'): kwargs['database'] = self._postgresql.database try: with get_connection_cursor(connect_timeout=3, options='-c statement_timeout=2000', **kwargs) as cur: cur.execute('SELECT pg_catalog.pg_is_in_recovery()') if not cur.fetchone()[0]: return True logger.info('Leader is still in_recovery and therefore can\'t be used for rewind') except Exception: return logger.exception('Exception when working with leader') def _get_local_timeline_lsn_from_controldata(self): timeline = lsn = None data = self._postgresql.controldata() try: if data.get('Database cluster state') == 'shut down in recovery': lsn = data.get('Minimum recovery ending location') timeline = int(data.get("Min recovery ending loc's timeline")) if lsn == '0/0' or timeline == 0: # it was a master when it crashed data['Database cluster state'] = 'shut down' if data.get('Database cluster state') == 'shut down': lsn = data.get('Latest checkpoint location') timeline = int(data.get("Latest checkpoint's TimeLineID")) except (TypeError, ValueError): logger.exception('Failed to get local timeline and lsn from pg_controldata output') return timeline, lsn def _get_local_timeline_lsn(self): if self._postgresql.is_running(): # if postgres is running - get timeline and lsn from replication connection timeline, lsn = self._postgresql.get_local_timeline_lsn_from_replication_connection() else: # otherwise analyze pg_controldata output timeline, lsn = self._get_local_timeline_lsn_from_controldata() logger.info('Local timeline=%s lsn=%s', timeline, lsn) return timeline, lsn def _check_timeline_and_lsn(self, leader): local_timeline, local_lsn = self._get_local_timeline_lsn() if local_timeline is None or local_lsn is None: return if isinstance(leader, Leader): if leader.member.data.get('role') != 'master': return # standby cluster elif not self.check_leader_is_not_in_recovery(**leader.conn_kwargs(self._postgresql.config.replication)): return history = need_rewind = None try: with self._postgresql.get_replication_connection_cursor(**leader.conn_kwargs()) as cur: cur.execute('IDENTIFY_SYSTEM') master_timeline = cur.fetchone()[1] logger.info('master_timeline=%s', master_timeline) if local_timeline > master_timeline: # Not always supported by pg_rewind need_rewind = True elif master_timeline > 1: cur.execute('TIMELINE_HISTORY %s', (master_timeline,)) history = bytes(cur.fetchone()[1]).decode('utf-8') logger.info('master: history=%s', history) else: # local_timeline == master_timeline == 1 need_rewind = False except Exception: return logger.exception('Exception when working with master via replication connection') if history is not None: for parent_timeline, switchpoint, _ in parse_history(history): if parent_timeline == local_timeline: try: need_rewind = parse_lsn(local_lsn) >= switchpoint except (IndexError, ValueError): logger.exception('Exception when parsing lsn') break elif parent_timeline > local_timeline: break self._state = need_rewind and REWIND_STATUS.NEED or REWIND_STATUS.NOT_NEED def rewind_or_reinitialize_needed_and_possible(self, leader): if leader and leader.name != self._postgresql.name and leader.conn_url and self._state == REWIND_STATUS.CHECK: self._check_timeline_and_lsn(leader) return leader and leader.conn_url and self._state == REWIND_STATUS.NEED def check_for_checkpoint_after_promote(self): if self._state == REWIND_STATUS.INITIAL and self._postgresql.is_leader() and \ self._postgresql.get_master_timeline() == self._postgresql.pg_control_timeline(): self._state = REWIND_STATUS.CHECKPOINT def checkpoint_after_promote(self): return self._state == REWIND_STATUS.CHECKPOINT def pg_rewind(self, r): # prepare pg_rewind connection env = self._postgresql.config.write_pgpass(r) env['PGOPTIONS'] = '-c statement_timeout=0' dsn = self._postgresql.config.format_dsn(r, True) logger.info('running pg_rewind from %s', dsn) try: return self._postgresql.cancellable.call([self._postgresql.pgcommand('pg_rewind'), '-D', self._postgresql.data_dir, '--source-server', dsn], env=env) == 0 except OSError: return False def execute(self, leader): if self._postgresql.is_running() and not self._postgresql.stop(checkpoint=False): return logger.warning('Can not run pg_rewind because postgres is still running') # prepare pg_rewind connection r = leader.conn_kwargs(self._postgresql.config.rewind_credentials) # 1. make sure that we are really trying to rewind from the master # 2. make sure that pg_control contains the new timeline by: # running a checkpoint or # waiting until Patroni on the master will expose checkpoint_after_promote=True checkpoint_status = leader.checkpoint_after_promote if isinstance(leader, Leader) else None if checkpoint_status is None: # master still runs the old Patroni leader_status = self._postgresql.checkpoint(leader.conn_kwargs(self._postgresql.config.superuser)) if leader_status: return logger.warning('Can not use %s for rewind: %s', leader.name, leader_status) elif not checkpoint_status: return logger.info('Waiting for checkpoint on %s before rewind', leader.name) elif not self.check_leader_is_not_in_recovery(**r): return if self.pg_rewind(r): self._state = REWIND_STATUS.SUCCESS elif not self.check_leader_is_not_in_recovery(**r): logger.warning('Failed to rewind because master %s become unreachable', leader.name) else: logger.error('Failed to rewind from healty master: %s', leader.name) for name in ('remove_data_directory_on_rewind_failure', 'remove_data_directory_on_diverged_timelines'): if self._postgresql.config.get(name): logger.warning('%s is set. removing...', name) self._postgresql.remove_data_directory() self._state = REWIND_STATUS.INITIAL break else: self._state = REWIND_STATUS.FAILED return False def reset_state(self): self._state = REWIND_STATUS.INITIAL @property def is_needed(self): return self._state in (REWIND_STATUS.CHECK, REWIND_STATUS.NEED) @property def executed(self): return self._state > REWIND_STATUS.NOT_NEED @property def failed(self): return self._state == REWIND_STATUS.FAILED patroni-1.6.4/patroni/postgresql/slots.py000066400000000000000000000126401361356115100206000ustar00rootroot00000000000000import logging from patroni.postgresql.connection import get_connection_cursor from collections import defaultdict logger = logging.getLogger(__name__) def compare_slots(s1, s2): return s1['type'] == s2['type'] and (s1['type'] == 'physical' or s1['database'] == s2['database'] and s1['plugin'] == s2['plugin']) class SlotsHandler(object): def __init__(self, postgresql): self._postgresql = postgresql self._replication_slots = {} # already existing replication slots self.schedule() def _query(self, sql, *params): return self._postgresql.query(sql, *params, retry=False) def load_replication_slots(self): if self._postgresql.major_version >= 90400 and self._schedule_load_slots: replication_slots = {} cursor = self._query('SELECT slot_name, slot_type, plugin, database FROM pg_catalog.pg_replication_slots') for r in cursor: value = {'type': r[1]} if r[1] == 'logical': value.update({'plugin': r[2], 'database': r[3]}) replication_slots[r[0]] = value self._replication_slots = replication_slots self._schedule_load_slots = False def drop_replication_slot(self, name): cursor = self._query(('SELECT pg_catalog.pg_drop_replication_slot(%s) WHERE EXISTS (SELECT 1 ' + 'FROM pg_catalog.pg_replication_slots WHERE slot_name = %s AND NOT active)'), name, name) # In normal situation rowcount should be 1, otherwise either slot doesn't exists or it is still active return cursor.rowcount == 1 def sync_replication_slots(self, cluster): if self._postgresql.major_version >= 90400: try: self.load_replication_slots() slots = cluster.get_replication_slots(self._postgresql.name, self._postgresql.role) # drop old replication slots which are not presented in desired slots for name in set(self._replication_slots) - set(slots): if not self.drop_replication_slot(name): logger.error("Failed to drop replication slot '%s'", name) self._schedule_load_slots = True immediately_reserve = ', true' if self._postgresql.major_version >= 90600 else '' logical_slots = defaultdict(dict) for name, value in slots.items(): if name in self._replication_slots and not compare_slots(value, self._replication_slots[name]): logger.info("Trying to drop replication slot '%s' because value is changing from %s to %s", name, self._replication_slots[name], value) if not self.drop_replication_slot(name): logger.error("Failed to drop replication slot '%s'", name) self._schedule_load_slots = True continue self._replication_slots.pop(name) if name not in self._replication_slots: if value['type'] == 'physical': try: self._query(("SELECT pg_catalog.pg_create_physical_replication_slot(%s{0})" + " WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_replication_slots" + " WHERE slot_type = 'physical' AND slot_name = %s)").format( immediately_reserve), name, name) except Exception: logger.exception("Failed to create physical replication slot '%s'", name) self._schedule_load_slots = True elif value['type'] == 'logical' and name not in self._replication_slots: logical_slots[value['database']][name] = value # create new logical slots for database, values in logical_slots.items(): conn_kwargs = self._postgresql.config.local_connect_kwargs conn_kwargs['database'] = database with get_connection_cursor(**conn_kwargs) as cur: for name, value in values.items(): try: cur.execute("SELECT pg_catalog.pg_create_logical_replication_slot(%s, %s)" + " WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_replication_slots" + " WHERE slot_type = 'logical' AND slot_name = %s)", (name, value['plugin'], name)) except Exception: logger.exception("Failed to create logical replication slot '%s' plugin='%s'", name, value['plugin']) self._schedule_load_slots = True self._replication_slots = slots except Exception: logger.exception('Exception when changing replication slots') self._schedule_load_slots = True def schedule(self, value=None): if value is None: value = self._postgresql.major_version >= 90400 self._schedule_load_slots = value patroni-1.6.4/patroni/request.py000066400000000000000000000041421361356115100167170ustar00rootroot00000000000000import json import urllib3 import six from six.moves.urllib_parse import urlparse, urlunparse from .utils import USER_AGENT class PatroniRequest(object): def __init__(self, config, insecure=False): cert_reqs = 'CERT_NONE' if insecure or config.get('ctl', {}).get('insecure', False) else 'CERT_REQUIRED' self._pool = urllib3.PoolManager(num_pools=10, maxsize=10, cert_reqs=cert_reqs) self.reload_config(config) @staticmethod def _get_cfg_value(config, name): return config.get('ctl', {}).get(name) or config.get('restapi', {}).get(name) def _apply_pool_param(self, param, value): if value: self._pool.connection_pool_kw[param] = value else: self._pool.connection_pool_kw.pop(param, None) def _apply_ssl_file_param(self, config, name): value = self._get_cfg_value(config, name + 'file') self._apply_pool_param(name + '_file', value) return value def reload_config(self, config): self._pool.headers = urllib3.make_headers(basic_auth=self._get_cfg_value(config, 'auth'), user_agent=USER_AGENT) if self._apply_ssl_file_param(config, 'cert'): self._apply_ssl_file_param(config, 'key') else: self._pool.connection_pool_kw.pop('key_file', None) cacert = config.get('ctl', {}).get('cacert') or config.get('restapi', {}).get('cafile') self._apply_pool_param('ca_certs', cacert) def request(self, method, url, body=None, **kwargs): if body is not None and not isinstance(body, six.string_types): body = json.dumps(body) return self._pool.request(method.upper(), url, body=body, **kwargs) def __call__(self, member, method='GET', endpoint=None, data=None, **kwargs): url = member.api_url if endpoint: scheme, netloc, _, _, _, _ = urlparse(url) url = urlunparse((scheme, netloc, endpoint, '', '', '')) return self.request(method, url, data, **kwargs) def get(url, verify=True, **kwargs): http = PatroniRequest({}, not verify) return http.request('GET', url, **kwargs) patroni-1.6.4/patroni/scripts/000077500000000000000000000000001361356115100163435ustar00rootroot00000000000000patroni-1.6.4/patroni/scripts/__init__.py000066400000000000000000000000001361356115100204420ustar00rootroot00000000000000patroni-1.6.4/patroni/scripts/aws.py000077500000000000000000000054501361356115100175160ustar00rootroot00000000000000#!/usr/bin/env python import json import logging import sys import boto.ec2 from patroni.utils import Retry, RetryFailedError from patroni.request import get as requests_get logger = logging.getLogger(__name__) class AWSConnection(object): def __init__(self, cluster_name): self.available = False self.cluster_name = cluster_name if cluster_name is not None else 'unknown' self._retry = Retry(deadline=300, max_delay=30, max_tries=-1, retry_exceptions=(boto.exception.StandardError,)) try: # get the instance id r = requests_get('http://169.254.169.254/latest/dynamic/instance-identity/document', timeout=2.1) except Exception: logger.error('cannot query AWS meta-data') return if r.status < 400: try: content = json.loads(r.data.decode('utf-8')) self.instance_id = content['instanceId'] self.region = content['region'] except Exception: logger.exception('unable to fetch instance id and region from AWS meta-data') return self.available = True def retry(self, *args, **kwargs): return self._retry.copy()(*args, **kwargs) def aws_available(self): return self.available def _tag_ebs(self, conn, role): """ set tags, carrying the cluster name, instance role and instance id for the EBS storage """ tags = {'Name': 'spilo_' + self.cluster_name, 'Role': role, 'Instance': self.instance_id} volumes = conn.get_all_volumes(filters={'attachment.instance-id': self.instance_id}) conn.create_tags([v.id for v in volumes], tags) def _tag_ec2(self, conn, role): """ tag the current EC2 instance with a cluster role """ tags = {'Role': role} conn.create_tags([self.instance_id], tags) def on_role_change(self, new_role): if not self.available: return False try: conn = self.retry(boto.ec2.connect_to_region, self.region) self.retry(self._tag_ec2, conn, new_role) self.retry(self._tag_ebs, conn, new_role) except RetryFailedError: logger.warning("Unable to communicate to AWS " "when setting tags for the EC2 instance {0} " "and attached EBS volumes".format(self.instance_id)) return False return True def main(): logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=logging.INFO) if len(sys.argv) == 4 and sys.argv[1] in ('on_start', 'on_stop', 'on_role_change'): AWSConnection(cluster_name=sys.argv[3]).on_role_change(sys.argv[2]) else: sys.exit("Usage: {0} action role name".format(sys.argv[0])) if __name__ == '__main__': main() patroni-1.6.4/patroni/scripts/wale_restore.py000077500000000000000000000340401361356115100214140ustar00rootroot00000000000000#!/usr/bin/env python # sample script to clone new replicas using WAL-E restore # falls back to pg_basebackup if WAL-E restore fails, or if # WAL-E backup is too far behind # note that pg_basebackup still expects to use restore from # WAL-E for transaction logs # theoretically should work with SWIFT, but not tested on it # arguments are: # - cluster scope # - cluster role # - master connection string # - number of retries # - envdir for the WALE env # - WALE_BACKUP_THRESHOLD_MEGABYTES if WAL amount is above that - use pg_basebackup # - WALE_BACKUP_THRESHOLD_PERCENTAGE if WAL size exceeds a certain percentage of the # this script depends on an envdir defining the S3 bucket (or SWIFT dir),and login # credentials per WALE Documentation. # currently also requires that you configure the restore_command to use wal_e, example: # recovery_conf: # restore_command: envdir /etc/wal-e.d/env wal-e wal-fetch "%f" "%p" -p 1 import argparse import csv import logging import os import psycopg2 import subprocess import sys import time from collections import namedtuple logger = logging.getLogger(__name__) RETRY_SLEEP_INTERVAL = 1 si_prefixes = ['K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'] # Meaningful names to the exit codes used by WALERestore ExitCode = type('Enum', (), { 'SUCCESS': 0, #: Succeeded 'RETRY_LATER': 1, #: External issue, retry later 'FAIL': 2 #: Don't try again unless configuration changes }) # We need to know the current PG version in order to figure out the correct WAL directory name def get_major_version(data_dir): version_file = os.path.join(data_dir, 'PG_VERSION') if os.path.isfile(version_file): # version file exists try: with open(version_file) as f: return float(f.read()) except Exception: logger.exception('Failed to read PG_VERSION from %s', data_dir) return 0.0 def repr_size(n_bytes): """ >>> repr_size(1000) '1000 Bytes' >>> repr_size(8257332324597) '7.5 TiB' """ if n_bytes < 1024: return '{0} Bytes'.format(n_bytes) i = -1 while n_bytes > 1023: n_bytes /= 1024.0 i += 1 return '{0} {1}iB'.format(round(n_bytes, 1), si_prefixes[i]) def size_as_bytes(size_, prefix): """ >>> size_as_bytes(7.5, 'T') 8246337208320 """ prefix = prefix.upper() assert prefix in si_prefixes exponent = si_prefixes.index(prefix) + 1 return int(size_ * (1024.0 ** exponent)) WALEConfig = namedtuple( 'WALEConfig', [ 'env_dir', 'threshold_mb', 'threshold_pct', 'cmd', ] ) class WALERestore(object): def __init__(self, scope, datadir, connstring, env_dir, threshold_mb, threshold_pct, use_iam, no_master, retries): self.scope = scope self.master_connection = connstring self.data_dir = datadir self.no_master = no_master wale_cmd = [ 'envdir', env_dir, 'wal-e', ] if use_iam == 1: wale_cmd += ['--aws-instance-profile'] self.wal_e = WALEConfig( env_dir=env_dir, threshold_mb=threshold_mb, threshold_pct=threshold_pct, cmd=wale_cmd, ) self.init_error = (not os.path.exists(self.wal_e.env_dir)) self.retries = retries def run(self): """ Creates a new replica using WAL-E Returns ------- ExitCode 0 = Success 1 = Error, try again 2 = Error, don't try again """ if self.init_error: logger.error('init error: %r did not exist at initialization time', self.wal_e.env_dir) return ExitCode.FAIL try: should_use_s3 = self.should_use_s3_to_create_replica() if should_use_s3 is None: # Need to retry return ExitCode.RETRY_LATER elif should_use_s3: return self.create_replica_with_s3() elif not should_use_s3: return ExitCode.FAIL except Exception: logger.exception("Unhandled exception when running WAL-E restore") return ExitCode.FAIL def should_use_s3_to_create_replica(self): """ determine whether it makes sense to use S3 and not pg_basebackup """ threshold_megabytes = self.wal_e.threshold_mb threshold_percent = self.wal_e.threshold_pct try: cmd = self.wal_e.cmd + ['backup-list', '--detail', 'LATEST'] logger.debug('calling %r', cmd) wale_output = subprocess.check_output(cmd) reader = csv.DictReader(wale_output.decode('utf-8').splitlines(), dialect='excel-tab') rows = list(reader) if not len(rows): logger.warning('wal-e did not find any backups') return False # This check might not add much, it was performed in the previous # version of this code. since the old version rolled CSV parsing the # check may have been part of the CSV parsing. if len(rows) > 1: logger.warning( 'wal-e returned more than one row of backups: %r', rows) return False backup_info = rows[0] except subprocess.CalledProcessError: logger.exception("could not query wal-e latest backup") return None try: backup_size = int(backup_info['expanded_size_bytes']) backup_start_segment = backup_info['wal_segment_backup_start'] backup_start_offset = backup_info['wal_segment_offset_backup_start'] except KeyError: logger.exception("unable to get some of WALE backup parameters") return None # WAL filename is XXXXXXXXYYYYYYYY000000ZZ, where X - timeline, Y - LSN logical log file, # ZZ - 2 high digits of LSN offset. The rest of the offset is the provided decimal offset, # that we have to convert to hex and 'prepend' to the high offset digits. lsn_segment = backup_start_segment[8:16] # first 2 characters of the result are 0x and the last one is L lsn_offset = hex((int(backup_start_segment[16:32], 16) << 24) + int(backup_start_offset))[2:-1] # construct the LSN from the segment and offset backup_start_lsn = '{0}/{1}'.format(lsn_segment, lsn_offset) diff_in_bytes = backup_size attempts_no = 0 while True: if self.master_connection: try: # get the difference in bytes between the current WAL location and the backup start offset with psycopg2.connect(self.master_connection) as con: if con.server_version >= 100000: wal_name = 'wal' lsn_name = 'lsn' else: wal_name = 'xlog' lsn_name = 'location' con.autocommit = True with con.cursor() as cur: cur.execute(("SELECT CASE WHEN pg_catalog.pg_is_in_recovery()" " THEN GREATEST(pg_catalog.pg_{0}_{1}_diff(COALESCE(" "pg_last_{0}_receive_{1}(), '0/0'), %s)::bigint, " "pg_catalog.pg_{0}_{1}_diff(pg_catalog.pg_last_{0}_replay_{1}(), %s)::bigint)" " ELSE pg_catalog.pg_{0}_{1}_diff(pg_catalog.pg_current_{0}_{1}(), %s)::bigint" " END").format(wal_name, lsn_name), (backup_start_lsn, backup_start_lsn, backup_start_lsn)) diff_in_bytes = int(cur.fetchone()[0]) except psycopg2.Error: logger.exception('could not determine difference with the master location') if attempts_no < self.retries: # retry in case of a temporarily connection issue attempts_no = attempts_no + 1 time.sleep(RETRY_SLEEP_INTERVAL) continue else: if not self.no_master: return False # do no more retries on the outer level logger.info("continue with base backup from S3 since master is not available") diff_in_bytes = 0 break else: # always try to use WAL-E if master connection string is not available diff_in_bytes = 0 break # if the size of the accumulated WAL segments is more than a certan percentage of the backup size # or exceeds the pre-determined size - pg_basebackup is chosen instead. is_size_thresh_ok = diff_in_bytes < int(threshold_megabytes) * 1048576 threshold_pct_bytes = backup_size * threshold_percent / 100.0 is_percentage_thresh_ok = float(diff_in_bytes) < int(threshold_pct_bytes) are_thresholds_ok = is_size_thresh_ok and is_percentage_thresh_ok class Size(object): def __init__(self, n_bytes, prefix=None): self.n_bytes = n_bytes self.prefix = prefix def __repr__(self): if self.prefix is not None: n_bytes = size_as_bytes(self.n_bytes, self.prefix) else: n_bytes = self.n_bytes return repr_size(n_bytes) class HumanContext(object): def __init__(self, items): self.items = items def __repr__(self): return ', '.join('{}={!r}'.format(key, value) for key, value in self.items) human_context = repr(HumanContext([ ('threshold_size', Size(threshold_megabytes, 'M')), ('threshold_percent', threshold_percent), ('threshold_percent_size', Size(threshold_pct_bytes)), ('backup_size', Size(backup_size)), ('backup_diff', Size(diff_in_bytes)), ('is_size_thresh_ok', is_size_thresh_ok), ('is_percentage_thresh_ok', is_percentage_thresh_ok), ])) if not are_thresholds_ok: logger.info('wal-e backup size diff is over threshold, falling back ' 'to other means of restore: %s', human_context) else: logger.info('Thresholds are OK, using wal-e basebackup: %s', human_context) return are_thresholds_ok def fix_subdirectory_path_if_broken(self, dirname): # in case it is a symlink pointing to a non-existing location, remove it and create the actual directory path = os.path.join(self.data_dir, dirname) if not os.path.exists(path): if os.path.islink(path): # broken xlog symlink, to remove try: os.remove(path) except OSError: logger.exception("could not remove broken %s symlink pointing to %s", dirname, os.readlink(path)) return False try: os.mkdir(path) except OSError: logger.exception("coud not create missing %s directory path", dirname) return False return True def create_replica_with_s3(self): # if we're set up, restore the replica using fetch latest try: cmd = self.wal_e.cmd + ['backup-fetch', '{}'.format(self.data_dir), 'LATEST'] logger.debug('calling: %r', cmd) exit_code = subprocess.call(cmd) except Exception as e: logger.error('Error when fetching backup with WAL-E: {0}'.format(e)) return ExitCode.RETRY_LATER if (exit_code == 0 and not self.fix_subdirectory_path_if_broken('pg_xlog' if get_major_version(self.data_dir) < 10 else 'pg_wal')): return ExitCode.FAIL return exit_code def main(): logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=logging.INFO) parser = argparse.ArgumentParser(description='Script to image replicas using WAL-E') parser.add_argument('--scope', required=True) parser.add_argument('--role', required=False) parser.add_argument('--datadir', required=True) parser.add_argument('--connstring', required=True) parser.add_argument('--retries', type=int, default=1) parser.add_argument('--envdir', required=True) parser.add_argument('--threshold_megabytes', type=int, default=10240) parser.add_argument('--threshold_backup_size_percentage', type=int, default=30) parser.add_argument('--use_iam', type=int, default=0) parser.add_argument('--no_master', type=int, default=0) args = parser.parse_args() exit_code = None assert args.retries >= 0 # Retry cloning in a loop. We do separate retries for the master # connection attempt inside should_use_s3_to_create_replica, # because we need to differentiate between the last attempt and # the rest and make a decision when the last attempt fails on # whether to use WAL-E or not depending on the no_master flag. for _ in range(0, args.retries + 1): restore = WALERestore(scope=args.scope, datadir=args.datadir, connstring=args.connstring, env_dir=args.envdir, threshold_mb=args.threshold_megabytes, threshold_pct=args.threshold_backup_size_percentage, use_iam=args.use_iam, no_master=args.no_master, retries=args.retries) exit_code = restore.run() if not exit_code == ExitCode.RETRY_LATER: # only WAL-E failures lead to the retry logger.debug('exit_code is %r, not retrying', exit_code) break time.sleep(RETRY_SLEEP_INTERVAL) return exit_code if __name__ == '__main__': sys.exit(main()) patroni-1.6.4/patroni/utils.py000066400000000000000000000346241361356115100163770ustar00rootroot00000000000000import logging import os import platform import random import re import tempfile import time from dateutil import tz from .exceptions import PatroniException from .version import __version__ tzutc = tz.tzutc() logger = logging.getLogger(__name__) USER_AGENT = 'Patroni/{0} Python/{1} {2}'.format(__version__, platform.python_version(), platform.system()) OCT_RE = re.compile(r'^[-+]?0[0-7]*') DEC_RE = re.compile(r'^[-+]?(0|[1-9][0-9]*)') HEX_RE = re.compile(r'^[-+]?0x[0-9a-fA-F]+') DBL_RE = re.compile(r'^[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?') def deep_compare(obj1, obj2): """ >>> deep_compare({'1': None}, {}) False >>> deep_compare({'1': {}}, {'1': None}) False >>> deep_compare({'1': [1]}, {'1': [2]}) False >>> deep_compare({'1': 2}, {'1': '2'}) True >>> deep_compare({'1': {'2': [3, 4]}}, {'1': {'2': [3, 4]}}) True """ if set(list(obj1.keys())) != set(list(obj2.keys())): # Objects have different sets of keys return False for key, value in obj1.items(): if isinstance(value, dict): if not (isinstance(obj2[key], dict) and deep_compare(value, obj2[key])): return False elif str(value) != str(obj2[key]): return False return True def patch_config(config, data): """recursively 'patch' `config` with `data` :returns: `!True` if the `config` was changed""" is_changed = False for name, value in data.items(): if value is None: if config.pop(name, None) is not None: is_changed = True elif name in config: if isinstance(value, dict): if isinstance(config[name], dict): if patch_config(config[name], value): is_changed = True else: config[name] = value is_changed = True elif str(config[name]) != str(value): config[name] = value is_changed = True else: config[name] = value is_changed = True return is_changed def parse_bool(value): """ >>> parse_bool(1) True >>> parse_bool('off') False >>> parse_bool('foo') """ value = str(value).lower() if value in ('on', 'true', 'yes', '1'): return True if value in ('off', 'false', 'no', '0'): return False def strtol(value, strict=True): """As most as possible close equivalent of strtol(3) function (with base=0), used by postgres to parse parameter values. >>> strtol(0) == (0, '') True >>> strtol(1) == (1, '') True >>> strtol(9) == (9, '') True >>> strtol(' +0x400MB') == (1024, 'MB') True >>> strtol(' -070d') == (-56, 'd') True >>> strtol(' d ') == (None, 'd') True >>> strtol(' 1 d ') == (1, ' d') True >>> strtol('9s', False) == (9, 's') True >>> strtol(' s ', False) == (1, 's') True """ value = str(value).strip() for regex, base in ((HEX_RE, 16), (OCT_RE, 8), (DEC_RE, 10)): match = regex.match(value) if match: end = match.end() return int(value[:end], base), value[end:] return (None if strict else 1), value def strtod(value): """As most as possible close equivalent of strtod(3) function used by postgres to parse parameter values. >>> strtod(' A ') == (None, 'A') True """ value = str(value).strip() match = DBL_RE.match(value) if match: end = match.end() return float(value[:end]), value[end:] return None, value def rint(value): """ >>> rint(0.5) == 0 True >>> rint(0.501) == 1 True >>> rint(1.5) == 2 True """ ret = round(value) return 2.0 * round(value / 2.0) if abs(ret - value) == 0.5 else ret def convert_to_base_unit(value, unit, base_unit): convert = { 'B': {'B': 1, 'kB': 1024, 'MB': 1024 * 1024, 'GB': 1024 * 1024 * 1024, 'TB': 1024 * 1024 * 1024 * 1024}, 'kB': {'B': 1.0 / 1024, 'kB': 1, 'MB': 1024, 'GB': 1024 * 1024, 'TB': 1024 * 1024 * 1024}, 'MB': {'B': 1.0 / (1024 * 1024), 'kB': 1.0 / 1024, 'MB': 1, 'GB': 1024, 'TB': 1024 * 1024}, 'ms': {'us': 1.0 / 1000, 'ms': 1, 's': 1000, 'min': 1000 * 60, 'h': 1000 * 60 * 60, 'd': 1000 * 60 * 60 * 24}, 's': {'us': 1.0 / (1000 * 1000), 'ms': 1.0 / 1000, 's': 1, 'min': 60, 'h': 60 * 60, 'd': 60 * 60 * 24}, 'min': {'us': 1.0 / (1000 * 1000 * 60), 'ms': 1.0 / (1000 * 60), 's': 1.0 / 60, 'min': 1, 'h': 60, 'd': 60 * 24} } round_order = { 'TB': 'GB', 'GB': 'MB', 'MB': 'kB', 'kB': 'B', 'd': 'h', 'h': 'min', 'min': 's', 's': 'ms', 'ms': 'us' } if base_unit and base_unit not in convert: base_value, base_unit = strtol(base_unit, False) else: base_value = 1 if base_unit in convert and unit in convert[base_unit]: value *= convert[base_unit][unit] / float(base_value) if unit in round_order: multiplier = convert[base_unit][round_order[unit]] value = rint(value / float(multiplier)) * multiplier return value def parse_int(value, base_unit=None): """ >>> parse_int('1') == 1 True >>> parse_int(' 0x400 MB ', '16384kB') == 64 True >>> parse_int('1MB', 'kB') == 1024 True >>> parse_int('1000 ms', 's') == 1 True >>> parse_int('1TB', 'GB') is None True >>> parse_int(0) == 0 True >>> parse_int('6GB', '16MB') == 384 True >>> parse_int('4097.4kB', 'kB') == 4097 True >>> parse_int('4097.5kB', 'kB') == 4098 True """ val, unit = strtol(value) if val is None and unit.startswith('.') or unit and unit[0] in ('.', 'e', 'E'): val, unit = strtod(value) if val is not None: unit = unit.strip() if not unit: return int(rint(val)) val = convert_to_base_unit(val, unit, base_unit) if val is not None: return int(rint(val)) def parse_real(value, base_unit=None): """ >>> parse_real(' +0.0005 ') == 0.0005 True >>> parse_real('0.0005ms', 'ms') == 0.0 True >>> parse_real('0.00051ms', 'ms') == 0.001 True """ val, unit = strtod(value) if val is not None: unit = unit.strip() if not unit: return val return convert_to_base_unit(val, unit, base_unit) def compare_values(vartype, unit, old_value, new_value): """ >>> compare_values('enum', None, 'remote_write', 'REMOTE_WRITE') True >>> compare_values('real', None, '1e-06', 0.000001) True """ converters = { 'bool': lambda v1, v2: parse_bool(v1), 'integer': parse_int, 'real': parse_real, 'enum': lambda v1, v2: str(v1).lower(), 'string': lambda v1, v2: str(v1) } convert = converters.get(vartype) or converters['string'] old_value = convert(old_value, None) new_value = convert(new_value, unit) return old_value is not None and new_value is not None and old_value == new_value def _sleep(interval): time.sleep(interval) class RetryFailedError(PatroniException): """Raised when retrying an operation ultimately failed, after retrying the maximum number of attempts.""" class Retry(object): """Helper for retrying a method in the face of retry-able exceptions""" def __init__(self, max_tries=1, delay=0.1, backoff=2, max_jitter=0.8, max_delay=3600, sleep_func=_sleep, deadline=None, retry_exceptions=PatroniException): """Create a :class:`Retry` instance for retrying function calls :param max_tries: How many times to retry the command. -1 means infinite tries. :param delay: Initial delay between retry attempts. :param backoff: Backoff multiplier between retry attempts. Defaults to 2 for exponential backoff. :param max_jitter: Additional max jitter period to wait between retry attempts to avoid slamming the server. :param max_delay: Maximum delay in seconds, regardless of other backoff settings. Defaults to one hour. :param retry_exceptions: single exception or tuple""" self.max_tries = max_tries self.delay = delay self.backoff = backoff self.max_jitter = int(max_jitter * 100) self.max_delay = float(max_delay) self._attempts = 0 self._cur_delay = delay self.deadline = deadline self._cur_stoptime = None self.sleep_func = sleep_func self.retry_exceptions = retry_exceptions def reset(self): """Reset the attempt counter""" self._attempts = 0 self._cur_delay = self.delay self._cur_stoptime = None def copy(self): """Return a clone of this retry manager""" return Retry(max_tries=self.max_tries, delay=self.delay, backoff=self.backoff, max_jitter=self.max_jitter / 100.0, max_delay=self.max_delay, sleep_func=self.sleep_func, deadline=self.deadline, retry_exceptions=self.retry_exceptions) @property def sleeptime(self): return self._cur_delay + (random.randint(0, self.max_jitter) / 100.0) def update_delay(self): self._cur_delay = min(self._cur_delay * self.backoff, self.max_delay) @property def stoptime(self): return self._cur_stoptime def __call__(self, func, *args, **kwargs): """Call a function with arguments until it completes without throwing a `retry_exceptions` :param func: Function to call :param args: Positional arguments to call the function with :params kwargs: Keyword arguments to call the function with The function will be called until it doesn't throw one of the retryable exceptions""" self.reset() while True: try: if self.deadline is not None and self._cur_stoptime is None: self._cur_stoptime = time.time() + self.deadline return func(*args, **kwargs) except self.retry_exceptions as e: # Note: max_tries == -1 means infinite tries. if self._attempts == self.max_tries: logger.warning('Retry got exception: %s', e) raise RetryFailedError("Too many retry attempts") self._attempts += 1 sleeptime = self.sleeptime if self._cur_stoptime is not None and time.time() + sleeptime >= self._cur_stoptime: logger.warning('Retry got exception: %s', e) raise RetryFailedError("Exceeded retry deadline") logger.debug('Retry got exception: %s', e) self.sleep_func(sleeptime) self.update_delay() def polling_loop(timeout, interval=1): """Returns an iterator that returns values until timeout has passed. Timeout is measured from start of iteration.""" start_time = time.time() iteration = 0 end_time = start_time + timeout while time.time() < end_time: yield iteration iteration += 1 time.sleep(interval) def split_host_port(value, default_port): t = value.rsplit(':', 1) t.append(default_port) return t[0], int(t[1]) def uri(proto, netloc, path='', user=None): host, port = netloc if isinstance(netloc, (list, tuple)) else split_host_port(netloc, 0) if host and ':' in host and host[0] != '[' and host[-1] != ']': host = '[{0}]'.format(host) port = ':{0}'.format(port) if port else '' path = '/{0}'.format(path) if path and not path.startswith('/') else path user = '{0}@'.format(user) if user else '' return '{0}://{1}{2}{3}{4}'.format(proto, user, host, port, path) def is_standby_cluster(config): # Check whether or not provided configuration describes a standby cluster return isinstance(config, dict) and (config.get('host') or config.get('port') or config.get('restore_command')) def cluster_as_json(cluster): leader_name = cluster.leader.name if cluster.leader else None xlog_location_cluster = cluster.last_leader_operation or 0 ret = {'members': []} for m in cluster.members: if m.name == leader_name: config = cluster.config.data if cluster.config and cluster.config.modify_index else {} role = 'standby_leader' if is_standby_cluster(config.get('standby_cluster')) else 'leader' elif m.name == cluster.sync.sync_standby: role = 'sync_standby' else: role = 'replica' conn_kwargs = m.conn_kwargs() member = {'name': m.name, 'host': conn_kwargs['host'], 'port': int(conn_kwargs['port']), 'role': role, 'state': m.data.get('state', ''), 'api_url': m.api_url} optional_attributes = ('timeline', 'pending_restart', 'scheduled_restart', 'tags') member.update({n: m.data[n] for n in optional_attributes if n in m.data}) if m.name != leader_name: xlog_location = m.data.get('xlog_location') if xlog_location is None: member['lag'] = 'unknown' elif xlog_location_cluster >= xlog_location: member['lag'] = xlog_location_cluster - xlog_location else: member['lag'] = 0 ret['members'].append(member) # sort members by name for consistency ret['members'].sort(key=lambda m: m['name']) if cluster.is_paused(): ret['pause'] = True if cluster.failover and cluster.failover.scheduled_at: ret['scheduled_switchover'] = {'at': cluster.failover.scheduled_at.isoformat()} if cluster.failover.leader: ret['scheduled_switchover']['from'] = cluster.failover.leader if cluster.failover.candidate: ret['scheduled_switchover']['to'] = cluster.failover.candidate return ret def is_subpath(d1, d2): real_d1 = os.path.realpath(d1) + os.path.sep real_d2 = os.path.realpath(os.path.join(real_d1, d2)) return os.path.commonprefix([real_d1, real_d2 + os.path.sep]) == real_d1 def validate_directory(d, msg="{} {}"): if not os.path.exists(d): try: os.makedirs(d) except OSError as e: logger.error(e) raise PatroniException(msg.format(d, "couldn't create the directory")) elif os.path.isdir(d): try: fd, tmpfile = tempfile.mkstemp(dir=d) os.close(fd) os.remove(tmpfile) except OSError: raise PatroniException(msg.format(d, "the directory is not writable")) else: raise PatroniException(msg.format(d, "is not a directory")) patroni-1.6.4/patroni/version.py000066400000000000000000000000261361356115100167110ustar00rootroot00000000000000__version__ = '1.6.4' patroni-1.6.4/patroni/watchdog/000077500000000000000000000000001361356115100164545ustar00rootroot00000000000000patroni-1.6.4/patroni/watchdog/__init__.py000066400000000000000000000001421361356115100205620ustar00rootroot00000000000000from patroni.watchdog.base import WatchdogError, Watchdog __all__ = ['WatchdogError', 'Watchdog'] patroni-1.6.4/patroni/watchdog/base.py000066400000000000000000000266411361356115100177510ustar00rootroot00000000000000import abc import logging import platform import six import sys from threading import RLock from patroni.exceptions import WatchdogError __all__ = ['WatchdogError', 'Watchdog'] logger = logging.getLogger(__name__) MODE_REQUIRED = 'required' # Will not run if a watchdog is not available MODE_AUTOMATIC = 'automatic' # Will use a watchdog if one is available MODE_OFF = 'off' # Will not try to use a watchdog def parse_mode(mode): if mode is False: return MODE_OFF mode = mode.lower() if mode in ['require', 'required']: return MODE_REQUIRED elif mode in ['auto', 'automatic']: return MODE_AUTOMATIC else: if mode not in ['off', 'disable', 'disabled']: logger.warning("Watchdog mode {0} not recognized, disabling watchdog".format(mode)) return MODE_OFF def synchronized(func): def wrapped(self, *args, **kwargs): with self._lock: return func(self, *args, **kwargs) return wrapped class WatchdogConfig(object): """Helper to contain a snapshot of configuration""" def __init__(self, config): self.mode = parse_mode(config['watchdog'].get('mode', 'automatic')) self.ttl = config['ttl'] self.loop_wait = config['loop_wait'] self.safety_margin = config['watchdog'].get('safety_margin', 5) self.driver = config['watchdog'].get('driver', 'default') self.driver_config = dict((k, v) for k, v in config['watchdog'].items() if k not in ['mode', 'safety_margin', 'driver']) def __eq__(self, other): return isinstance(other, WatchdogConfig) and \ all(getattr(self, attr) == getattr(other, attr) for attr in ['mode', 'ttl', 'loop_wait', 'safety_margin', 'driver', 'driver_config']) def __ne__(self, other): return not self == other def get_impl(self): if self.driver == 'testing': # pragma: no cover from patroni.watchdog.linux import TestingWatchdogDevice return TestingWatchdogDevice.from_config(self.driver_config) elif platform.system() == 'Linux' and self.driver == 'default': from patroni.watchdog.linux import LinuxWatchdogDevice return LinuxWatchdogDevice.from_config(self.driver_config) else: return NullWatchdog() @property def timeout(self): if self.safety_margin == -1: return int(self.ttl // 2) else: return self.ttl - self.safety_margin @property def timing_slack(self): return self.timeout - self.loop_wait class Watchdog(object): """Facade to dynamically manage watchdog implementations and handle config changes. When activation fails underlying implementation will be switched to a Null implementation. To avoid log spam activation will only be retried when watchdog configuration is changed.""" def __init__(self, config): self.active_config = self.config = WatchdogConfig(config) self._lock = RLock() self.active = False if self.config.mode == MODE_OFF: self.impl = NullWatchdog() else: self.impl = self.config.get_impl() if self.config.mode == MODE_REQUIRED and self.impl.is_null: logger.error("Configuration requires a watchdog, but watchdog is not supported on this platform.") sys.exit(1) @synchronized def reload_config(self, config): self.config = WatchdogConfig(config) # Turning a watchdog off can always be done immediately if self.config.mode == MODE_OFF: if self.active: self._disable() self.active_config = self.config self.impl = NullWatchdog() # If watchdog is not active we can apply config immediately to show any warnings early. Otherwise we need to # delay until next time a keepalive is sent so timeout matches up with leader key update. if not self.active: if self.config.driver != self.active_config.driver or \ self.config.driver_config != self.active_config.driver_config: self.impl = self.config.get_impl() self.active_config = self.config @synchronized def activate(self): """Activates the watchdog device with suitable timeouts. While watchdog is active keepalive needs to be called every time loop_wait expires. :returns False if a safe watchdog could not be configured, but is required. """ self.active = True return self._activate() def _activate(self): self.active_config = self.config if self.config.timing_slack < 0: logger.warning('Watchdog not supported because leader TTL {0} is less than 2x loop_wait {1}' .format(self.config.ttl, self.config.loop_wait)) self.impl = NullWatchdog() try: self.impl.open() actual_timeout = self._set_timeout() except WatchdogError as e: logger.warning("Could not activate %s: %s", self.impl.describe(), e) self.impl = NullWatchdog() if self.impl.is_running and not self.impl.can_be_disabled: logger.warning("Watchdog implementation can't be disabled." " Watchdog will trigger after Patroni loses leader key.") if not self.impl.is_running or actual_timeout > self.config.timeout: if self.config.mode == MODE_REQUIRED: if self.impl.is_null: logger.error("Configuration requires watchdog, but watchdog could not be configured.") else: logger.error("Configuration requires watchdog, but a safe watchdog timeout {0} could" " not be configured. Watchdog timeout is {1}.".format( self.config.timeout, actual_timeout)) return False else: if not self.impl.is_null: logger.warning("Watchdog timeout {0} seconds does not ensure safe termination within {1} seconds" .format(actual_timeout, self.config.timeout)) if self.is_running: logger.info("{0} activated with {1} second timeout, timing slack {2} seconds" .format(self.impl.describe(), actual_timeout, self.config.timing_slack)) else: if self.config.mode == MODE_REQUIRED: logger.error("Configuration requires watchdog, but watchdog could not be activated") return False return True def _set_timeout(self): if self.impl.has_set_timeout(): self.impl.set_timeout(self.config.timeout) # Safety checks for watchdog implementations that don't support configurable timeouts actual_timeout = self.impl.get_timeout() if self.impl.is_running and actual_timeout < self.config.loop_wait: logger.error('loop_wait of {0} seconds is too long for watchdog {1} second timeout' .format(self.config.loop_wait, actual_timeout)) if self.impl.can_be_disabled: logger.info('Disabling watchdog due to unsafe timeout.') self.impl.close() self.impl = NullWatchdog() return None return actual_timeout @synchronized def disable(self): self._disable() self.active = False def _disable(self): try: if self.impl.is_running and not self.impl.can_be_disabled: # Give sysadmin some extra time to clean stuff up. self.impl.keepalive() logger.warning("Watchdog implementation can't be disabled. System will reboot after " "{0} seconds when watchdog times out.".format(self.impl.get_timeout())) self.impl.close() except WatchdogError as e: logger.error("Error while disabling watchdog: %s", e) @synchronized def keepalive(self): try: if self.active: self.impl.keepalive() # In case there are any pending configuration changes apply them now. if self.active and self.config != self.active_config: if self.config.mode != MODE_OFF and self.active_config.mode == MODE_OFF: self.impl = self.config.get_impl() self._activate() if self.config.driver != self.active_config.driver \ or self.config.driver_config != self.active_config.driver_config: self._disable() self.impl = self.config.get_impl() self._activate() if self.config.timeout != self.active_config.timeout: self.impl.set_timeout(self.config.timeout) except WatchdogError as e: logger.error("Error while sending keepalive: %s", e) @property @synchronized def is_running(self): return self.impl.is_running @property @synchronized def is_healthy(self): if self.config.mode != MODE_REQUIRED: return True return self.config.timing_slack >= 0 and self.impl.is_healthy @six.add_metaclass(abc.ABCMeta) class WatchdogBase(object): """A watchdog object when opened requires periodic calls to keepalive. When keepalive is not called within a timeout the system will be terminated.""" is_null = False @property def is_running(self): """Returns True when watchdog is activated and capable of performing it's task.""" return False @property def is_healthy(self): """Returns False when calling open() is known to fail.""" return False @property def can_be_disabled(self): """Returns True when watchdog will be disabled by calling close(). Some watchdog devices will keep running no matter what once activated. May raise WatchdogError if called without calling open() first.""" return True @abc.abstractmethod def open(self): """Open watchdog device. When watchdog is opened keepalive must be called. Returns nothing on success or raises WatchdogError if the device could not be opened.""" @abc.abstractmethod def close(self): """Gracefully close watchdog device.""" @abc.abstractmethod def keepalive(self): """Resets the watchdog timer. Watchdog must be open when keepalive is called.""" @abc.abstractmethod def get_timeout(self): """Returns the current keepalive timeout in effect.""" @staticmethod def has_set_timeout(): """Returns True if setting a timeout is supported.""" return False def set_timeout(self, timeout): """Set the watchdog timer timeout. :param timeout: watchdog timeout in seconds""" raise WatchdogError("Setting timeout is not supported on {0}".format(self.describe())) def describe(self): """Human readable name for this device""" return self.__class__.__name__ @classmethod def from_config(cls, config): return cls() class NullWatchdog(WatchdogBase): """Null implementation when watchdog is not supported.""" is_null = True def open(self): return def close(self): return def keepalive(self): return def get_timeout(self): # A big enough number to not matter return 1000000000 patroni-1.6.4/patroni/watchdog/linux.py000066400000000000000000000174161361356115100201760ustar00rootroot00000000000000import collections import ctypes import os import platform from patroni.watchdog.base import WatchdogBase, WatchdogError # Pythonification of linux/ioctl.h IOC_NONE = 0 IOC_WRITE = 1 IOC_READ = 2 IOC_NRBITS = 8 IOC_TYPEBITS = 8 IOC_SIZEBITS = 14 IOC_DIRBITS = 2 # Non-generic platform special cases machine = platform.machine() if machine in ['mips', 'sparc', 'powerpc', 'ppc64']: # pragma: no cover IOC_SIZEBITS = 13 IOC_DIRBITS = 3 IOC_NONE, IOC_WRITE, IOC_READ = 1, 2, 4 elif machine == 'parisc': # pragma: no cover IOC_WRITE, IOC_READ = 2, 1 IOC_NRSHIFT = 0 IOC_TYPESHIFT = IOC_NRSHIFT + IOC_NRBITS IOC_SIZESHIFT = IOC_TYPESHIFT + IOC_TYPEBITS IOC_DIRSHIFT = IOC_SIZESHIFT + IOC_SIZEBITS def IOW(type_, nr, size): return IOC(IOC_WRITE, type_, nr, size) def IOR(type_, nr, size): return IOC(IOC_READ, type_, nr, size) def IOWR(type_, nr, size): return IOC(IOC_READ | IOC_WRITE, type_, nr, size) def IOC(dir_, type_, nr, size): return (dir_ << IOC_DIRSHIFT) \ | (ord(type_) << IOC_TYPESHIFT) \ | (nr << IOC_NRSHIFT) \ | (size << IOC_SIZESHIFT) # Pythonification of linux/watchdog.h WATCHDOG_IOCTL_BASE = 'W' class watchdog_info(ctypes.Structure): _fields_ = [ ('options', ctypes.c_uint32), # Options the card/driver supports ('firmware_version', ctypes.c_uint32), # Firmware version of the card ('identity', ctypes.c_uint8 * 32), # Identity of the board ] struct_watchdog_info_size = ctypes.sizeof(watchdog_info) int_size = ctypes.sizeof(ctypes.c_int) WDIOC_GETSUPPORT = IOR(WATCHDOG_IOCTL_BASE, 0, struct_watchdog_info_size) WDIOC_GETSTATUS = IOR(WATCHDOG_IOCTL_BASE, 1, int_size) WDIOC_GETBOOTSTATUS = IOR(WATCHDOG_IOCTL_BASE, 2, int_size) WDIOC_GETTEMP = IOR(WATCHDOG_IOCTL_BASE, 3, int_size) WDIOC_SETOPTIONS = IOR(WATCHDOG_IOCTL_BASE, 4, int_size) WDIOC_KEEPALIVE = IOR(WATCHDOG_IOCTL_BASE, 5, int_size) WDIOC_SETTIMEOUT = IOWR(WATCHDOG_IOCTL_BASE, 6, int_size) WDIOC_GETTIMEOUT = IOR(WATCHDOG_IOCTL_BASE, 7, int_size) WDIOC_SETPRETIMEOUT = IOWR(WATCHDOG_IOCTL_BASE, 8, int_size) WDIOC_GETPRETIMEOUT = IOR(WATCHDOG_IOCTL_BASE, 9, int_size) WDIOC_GETTIMELEFT = IOR(WATCHDOG_IOCTL_BASE, 10, int_size) WDIOF_UNKNOWN = -1 # Unknown flag error WDIOS_UNKNOWN = -1 # Unknown status error WDIOF = { "OVERHEAT": 0x0001, # Reset due to CPU overheat "FANFAULT": 0x0002, # Fan failed "EXTERN1": 0x0004, # External relay 1 "EXTERN2": 0x0008, # External relay 2 "POWERUNDER": 0x0010, # Power bad/power fault "CARDRESET": 0x0020, # Card previously reset the CPU "POWEROVER": 0x0040, # Power over voltage "SETTIMEOUT": 0x0080, # Set timeout (in seconds) "MAGICCLOSE": 0x0100, # Supports magic close char "PRETIMEOUT": 0x0200, # Pretimeout (in seconds), get/set "ALARMONLY": 0x0400, # Watchdog triggers a management or other external alarm not a reboot "KEEPALIVEPING": 0x8000, # Keep alive ping reply } WDIOS = { "DISABLECARD": 0x0001, # Turn off the watchdog timer "ENABLECARD": 0x0002, # Turn on the watchdog timer "TEMPPANIC": 0x0004, # Kernel panic on temperature trip } # Implementation class WatchdogInfo(collections.namedtuple('WatchdogInfo', 'options,version,identity')): """Watchdog descriptor from the kernel""" def __getattr__(self, name): """Convenience has_XYZ attributes for checking WDIOF bits in options""" if name.startswith('has_') and name[4:] in WDIOF: return bool(self.options & WDIOF[name[4:]]) raise AttributeError("WatchdogInfo instance has no attribute '{0}'".format(name)) class LinuxWatchdogDevice(WatchdogBase): DEFAULT_DEVICE = '/dev/watchdog' def __init__(self, device): self.device = device self._support_cache = None self._fd = None @classmethod def from_config(cls, config): device = config.get('device', cls.DEFAULT_DEVICE) return cls(device) @property def is_running(self): return self._fd is not None @property def is_healthy(self): return os.path.exists(self.device) and os.access(self.device, os.W_OK) def open(self): try: self._fd = os.open(self.device, os.O_WRONLY) except OSError as e: raise WatchdogError("Can't open watchdog device: {0}".format(e)) def close(self): if self.is_running: try: os.write(self._fd, b'V') os.close(self._fd) self._fd = None except OSError as e: raise WatchdogError("Error while closing {0}: {1}".format(self.describe(), e)) @property def can_be_disabled(self): return self.get_support().has_MAGICCLOSE def _ioctl(self, func, arg): """Runs the specified ioctl on the underlying fd. Raises WatchdogError if the device is closed. Raises OSError or IOError (Python 2) when the ioctl fails.""" if self._fd is None: raise WatchdogError("Watchdog device is closed") if os.name != 'nt': import fcntl fcntl.ioctl(self._fd, func, arg, True) def get_support(self): if self._support_cache is None: info = watchdog_info() try: self._ioctl(WDIOC_GETSUPPORT, info) except (WatchdogError, OSError, IOError) as e: raise WatchdogError("Could not get information about watchdog device: {}".format(e)) self._support_cache = WatchdogInfo(info.options, info.firmware_version, bytearray(info.identity).decode(errors='ignore').rstrip('\x00')) return self._support_cache def describe(self): dev_str = " at {0}".format(self.device) if self.device != self.DEFAULT_DEVICE else "" ver_str = "" identity = "Linux watchdog device" if self._fd: try: _, version, identity = self.get_support() ver_str = " (firmware {0})".format(version) if version else "" except WatchdogError: pass return identity + ver_str + dev_str def keepalive(self): try: os.write(self._fd, b'1') except OSError as e: raise WatchdogError("Could not send watchdog keepalive: {0}".format(e)) def has_set_timeout(self): """Returns True if setting a timeout is supported.""" return self.get_support().has_SETTIMEOUT def set_timeout(self, timeout): timeout = int(timeout) if not 0 < timeout < 0xFFFF: raise WatchdogError("Invalid timeout {0}. Supported values are between 1 and 65535".format(timeout)) try: self._ioctl(WDIOC_SETTIMEOUT, ctypes.c_int(timeout)) except (WatchdogError, OSError, IOError) as e: raise WatchdogError("Could not set timeout on watchdog device: {}".format(e)) def get_timeout(self): timeout = ctypes.c_int() try: self._ioctl(WDIOC_GETTIMEOUT, timeout) except (WatchdogError, OSError, IOError) as e: raise WatchdogError("Could not get timeout on watchdog device: {}".format(e)) return timeout.value class TestingWatchdogDevice(LinuxWatchdogDevice): # pragma: no cover """Converts timeout ioctls to regular writes that can be intercepted from a named pipe.""" timeout = 60 def get_support(self): return WatchdogInfo(WDIOF['MAGICCLOSE'] | WDIOF['SETTIMEOUT'], 0, "Watchdog test harness") def set_timeout(self, timeout): buf = "Ctimeout={0}\n".format(timeout).encode('utf8') while len(buf): buf = buf[os.write(self._fd, buf):] self.timeout = timeout def get_timeout(self): return self.timeout patroni-1.6.4/patronictl.py000077500000000000000000000001341361356115100157320ustar00rootroot00000000000000#!/usr/bin/env python from patroni.ctl import ctl if __name__ == '__main__': ctl(None) patroni-1.6.4/postgres0.yml000066400000000000000000000070511361356115100156540ustar00rootroot00000000000000scope: batman #namespace: /service/ name: postgresql0 restapi: listen: 127.0.0.1:8008 connect_address: 127.0.0.1:8008 # certfile: /etc/ssl/certs/ssl-cert-snakeoil.pem # keyfile: /etc/ssl/private/ssl-cert-snakeoil.key # authentication: # username: username # password: password # ctl: # insecure: false # Allow connections to SSL sites without certs # certfile: /etc/ssl/certs/ssl-cert-snakeoil.pem # cacert: /etc/ssl/certs/ssl-cacert-snakeoil.pem etcd: #Provide host to do the initial discovery of the cluster topology: host: 127.0.0.1:2379 #Or use "hosts" to provide multiple endpoints #Could be a comma separated string: #hosts: host1:port1,host2:port2 #or an actual yaml list: #hosts: #- host1:port1 #- host2:port2 #Once discovery is complete Patroni will use the list of advertised clientURLs #It is possible to change this behavior through by setting: #use_proxies: true bootstrap: # this section will be written into Etcd:///config after initializing new cluster # and all other cluster members will use it as a `global configuration` dcs: ttl: 30 loop_wait: 10 retry_timeout: 10 maximum_lag_on_failover: 1048576 # master_start_timeout: 300 # synchronous_mode: false #standby_cluster: #host: 127.0.0.1 #port: 1111 #primary_slot_name: patroni postgresql: use_pg_rewind: true # use_slots: true parameters: # wal_level: hot_standby # hot_standby: "on" # wal_keep_segments: 8 # max_wal_senders: 10 # max_replication_slots: 10 # wal_log_hints: "on" # archive_mode: "on" # archive_timeout: 1800s # archive_command: mkdir -p ../wal_archive && test ! -f ../wal_archive/%f && cp %p ../wal_archive/%f # recovery_conf: # restore_command: cp ../wal_archive/%f %p # some desired options for 'initdb' initdb: # Note: It needs to be a list (some options need values, others are switches) - encoding: UTF8 - data-checksums pg_hba: # Add following lines to pg_hba.conf after running 'initdb' # For kerberos gss based connectivity (discard @.*$) #- host replication replicator 127.0.0.1/32 gss include_realm=0 #- host all all 0.0.0.0/0 gss include_realm=0 - host replication replicator 127.0.0.1/32 md5 - host all all 0.0.0.0/0 md5 # - hostssl all all 0.0.0.0/0 md5 # Additional script to be launched after initial cluster creation (will be passed the connection URL as parameter) # post_init: /usr/local/bin/setup_cluster.sh # Some additional users users which needs to be created after initializing new cluster users: admin: password: admin options: - createrole - createdb postgresql: listen: 127.0.0.1:5432 connect_address: 127.0.0.1:5432 data_dir: data/postgresql0 # bin_dir: # config_dir: pgpass: /tmp/pgpass0 authentication: replication: username: replicator password: rep-pass superuser: username: postgres password: zalando rewind: # Has no effect on postgres 10 and lower username: rewind_user password: rewind_password # Server side kerberos spn # krbsrvname: postgres parameters: # Fully qualified kerberos ticket file for the running user # same as KRB5CCNAME used by the GSS # krb_server_keyfile: /var/spool/keytabs/postgres unix_socket_directories: '.' #watchdog: # mode: automatic # Allowed values: off, automatic, required # device: /dev/watchdog # safety_margin: 5 tags: nofailover: false noloadbalance: false clonefrom: false nosync: false patroni-1.6.4/postgres1.yml000066400000000000000000000064701361356115100156610ustar00rootroot00000000000000scope: batman #namespace: /service/ name: postgresql1 restapi: listen: 127.0.0.1:8009 connect_address: 127.0.0.1:8009 # certfile: /etc/ssl/certs/ssl-cert-snakeoil.pem # keyfile: /etc/ssl/private/ssl-cert-snakeoil.key # authentication: # username: username # password: password # ctl: # insecure: false # Allow connections to SSL sites without certs # certfile: /etc/ssl/certs/ssl-cert-snakeoil.pem # cacert: /etc/ssl/certs/ssl-cacert-snakeoil.pem etcd: #Provide host to do the initial discovery of the cluster topology: host: 127.0.0.1:2379 #Or use "hosts" to provide multiple endpoints #Could be a comma separated string: #hosts: host1:port1,host2:port2 #or an actual yaml list: #hosts: #- host1:port1 #- host2:port2 #Once discovery is complete Patroni will use the list of advertised clientURLs #It is possible to change this behavior through by setting: #use_proxies: true bootstrap: # this section will be written into Etcd:///config after initializing new cluster # and all other cluster members will use it as a `global configuration` dcs: ttl: 30 loop_wait: 10 retry_timeout: 10 maximum_lag_on_failover: 1048576 postgresql: use_pg_rewind: true # use_slots: true parameters: # wal_level: hot_standby # hot_standby: "on" # wal_keep_segments: 8 # max_wal_senders: 10 # max_replication_slots: 10 # wal_log_hints: "on" # archive_mode: "on" # archive_timeout: 1800s # archive_command: mkdir -p ../wal_archive && test ! -f ../wal_archive/%f && cp %p ../wal_archive/%f # recovery_conf: # restore_command: cp ../wal_archive/%f %p # some desired options for 'initdb' initdb: # Note: It needs to be a list (some options need values, others are switches) - encoding: UTF8 - data-checksums pg_hba: # Add following lines to pg_hba.conf after running 'initdb' # For kerberos gss based connectivity (discard @.*$) #- host replication replicator 127.0.0.1/32 gss include_realm=0 #- host all all 0.0.0.0/0 gss include_realm=0 - host replication replicator 127.0.0.1/32 md5 - host all all 0.0.0.0/0 md5 # - hostssl all all 0.0.0.0/0 md5 # Additional script to be launched after initial cluster creation (will be passed the connection URL as parameter) # post_init: /usr/local/bin/setup_cluster.sh # Some additional users users which needs to be created after initializing new cluster users: admin: password: admin options: - createrole - createdb postgresql: listen: 127.0.0.1:5433 connect_address: 127.0.0.1:5433 data_dir: data/postgresql1 # bin_dir: # config_dir: pgpass: /tmp/pgpass1 authentication: replication: username: replicator password: rep-pass superuser: username: postgres password: zalando rewind: # Has no effect on postgres 10 and lower username: rewind_user password: rewind_password # Server side kerberos spn # krbsrvname: postgres parameters: # Fully qualified kerberos ticket file for the running user # same as KRB5CCNAME used by the GSS # krb_server_keyfile: /var/spool/keytabs/postgres unix_socket_directories: '.' basebackup: - verbose - max-rate: 100M tags: nofailover: false noloadbalance: false clonefrom: false patroni-1.6.4/postgres2.yml000066400000000000000000000061721361356115100156610ustar00rootroot00000000000000scope: batman #namespace: /service/ name: postgresql2 restapi: listen: 127.0.0.1:8010 connect_address: 127.0.0.1:8010 # certfile: /etc/ssl/certs/ssl-cert-snakeoil.pem # keyfile: /etc/ssl/private/ssl-cert-snakeoil.key authentication: username: username password: password # ctl: # insecure: false # Allow connections to SSL sites without certs # certfile: /etc/ssl/certs/ssl-cert-snakeoil.pem # cacert: /etc/ssl/certs/ssl-cacert-snakeoil.pem etcd: #Provide host to do the initial discovery of the cluster topology: host: 127.0.0.1:2379 #Or use "hosts" to provide multiple endpoints #Could be a comma separated string: #hosts: host1:port1,host2:port2 #or an actual yaml list: #hosts: #- host1:port1 #- host2:port2 #Once discovery is complete Patroni will use the list of advertised clientURLs #It is possible to change this behavior through by setting: #use_proxies: true bootstrap: # this section will be written into Etcd:///config after initializing new cluster # and all other cluster members will use it as a `global configuration` dcs: ttl: 30 loop_wait: 10 retry_timeout: 10 maximum_lag_on_failover: 1048576 postgresql: use_pg_rewind: true # use_slots: true parameters: # wal_level: hot_standby # hot_standby: "on" # wal_keep_segments: 8 # max_wal_senders: 10 # max_replication_slots: 10 # wal_log_hints: "on" # archive_mode: "on" # archive_timeout: 1800s # archive_command: mkdir -p ../wal_archive && test ! -f ../wal_archive/%f && cp %p ../wal_archive/%f # recovery_conf: # restore_command: cp ../wal_archive/%f %p # some desired options for 'initdb' initdb: # Note: It needs to be a list (some options need values, others are switches) - encoding: UTF8 - data-checksums pg_hba: # Add following lines to pg_hba.conf after running 'initdb' # For kerberos gss based connectivity (discard @.*$) #- host replication replicator 127.0.0.1/32 gss include_realm=0 #- host all all 0.0.0.0/0 gss include_realm=0 - host replication replicator 127.0.0.1/32 md5 - host all all 0.0.0.0/0 md5 # - hostssl all all 0.0.0.0/0 md5 # Some additional users users which needs to be created after initializing new cluster users: admin: password: admin options: - createrole - createdb postgresql: listen: 127.0.0.1:5434 connect_address: 127.0.0.1:5434 data_dir: data/postgresql2 # bin_dir: # config_dir: pgpass: /tmp/pgpass2 authentication: replication: username: replicator password: rep-pass superuser: username: postgres password: zalando rewind: # Has no effect on postgres 10 and lower username: rewind_user password: rewind_password # Server side kerberos spn # krbsrvname: postgres parameters: # Fully qualified kerberos ticket file for the running user # same as KRB5CCNAME used by the GSS # krb_server_keyfile: /var/spool/keytabs/postgres unix_socket_directories: '.' tags: nofailover: false noloadbalance: false clonefrom: false replicatefrom: postgres1 patroni-1.6.4/release.sh000077500000000000000000000010411361356115100151530ustar00rootroot00000000000000#!/bin/sh if [ $# -ne 1 ]; then >&2 echo "usage: $0 " exit 1 fi readonly VERSIONFILE="patroni/version.py" ## Bail out on any non-zero exitcode from the called processes set -xe python3 --version git --version version=$1 sed -i "s/__version__ = .*/__version__ = '${version}'/" "${VERSIONFILE}" python3 setup.py clean python3 setup.py test python3 setup.py flake8 git add "${VERSIONFILE}" git commit -m "Bumped version to $version" git push python3 setup.py sdist bdist_wheel upload git tag v${version} git push --tags patroni-1.6.4/requirements.txt000066400000000000000000000003331361356115100164630ustar00rootroot00000000000000urllib3>=1.19.1,!=1.21 boto PyYAML six >= 1.7 kazoo>=1.3.1 python-etcd>=0.4.3,<0.5 python-consul>=0.7.1 click>=4.1 prettytable>=0.7 tzlocal python-dateutil psutil>=2.0.0 cdiff kubernetes>=2.0.0,<=10.0.1,!=4.0.*,!=5.0.* patroni-1.6.4/setup.py000066400000000000000000000127111361356115100147140ustar00rootroot00000000000000#!/usr/bin/env python """ Setup file for patroni """ import inspect import os import sys from setuptools import Command, find_packages, setup __location__ = os.path.join(os.getcwd(), os.path.dirname(inspect.getfile(inspect.currentframe()))) NAME = 'patroni' MAIN_PACKAGE = NAME DESCRIPTION = 'PostgreSQL High-Available orchestrator and CLI' LICENSE = 'The MIT License' URL = 'https://github.com/zalando/patroni' AUTHOR = 'Alexander Kukushkin, Dmitrii Dolgov, Oleksii Kliukin' AUTHOR_EMAIL = 'alexander.kukushkin@zalando.de, dmitrii.dolgov@zalando.de, alexk@hintbits.com' KEYWORDS = 'etcd governor patroni postgresql postgres ha haproxy confd' +\ ' zookeeper exhibitor consul streaming replication kubernetes k8s' EXTRAS_REQUIRE = {'aws': ['boto'], 'etcd': ['python-etcd'], 'consul': ['python-consul'], 'exhibitor': ['kazoo'], 'zookeeper': ['kazoo'], 'kubernetes': ['kubernetes']} COVERAGE_XML = True COVERAGE_HTML = False # Add here all kinds of additional classifiers as defined under # https://pypi.python.org/pypi?%3Aaction=list_classifiers CLASSIFIERS = [ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Operating System :: MacOS', 'Operating System :: POSIX :: Linux', 'Operating System :: POSIX :: BSD :: FreeBSD', 'Operating System :: Microsoft :: Windows', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: Implementation :: CPython', ] CONSOLE_SCRIPTS = ['patroni = patroni:main', 'patronictl = patroni.ctl:ctl', "patroni_wale_restore = patroni.scripts.wale_restore:main", "patroni_aws = patroni.scripts.aws:main"] class PyTest(Command): user_options = [('cov=', None, 'Run coverage'), ('cov-xml=', None, 'Generate junit xml report'), ('cov-html=', None, 'Generate junit html report')] def initialize_options(self): self.cov = [] self.cov_xml = False self.cov_html = False def finalize_options(self): if self.cov_xml or self.cov_html: self.cov = ['--cov', MAIN_PACKAGE, '--cov-report', 'term-missing'] if self.cov_xml: self.cov.extend(['--cov-report', 'xml']) if self.cov_html: self.cov.extend(['--cov-report', 'html']) def run_tests(self): try: import pytest except Exception: raise RuntimeError('py.test is not installed, run: pip install pytest') import logging silence = logging.WARNING logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=os.getenv('LOGLEVEL', silence)) args = ['--verbose', 'tests', '--doctest-modules', MAIN_PACKAGE] +\ ['-s' if logging.getLogger().getEffectiveLevel() < silence else '--capture=fd'] if self.cov: args += self.cov errno = pytest.main(args=args) sys.exit(errno) def run(self): from pkg_resources import evaluate_marker requirements = self.distribution.install_requires + ['mock>=2.0.0', 'pytest-cov', 'pytest'] +\ [v for k, v in self.distribution.extras_require.items() if not k.startswith(':') or evaluate_marker(k[1:])] self.distribution.fetch_build_eggs(requirements) self.run_tests() def read(fname): with open(os.path.join(__location__, fname)) as fd: return fd.read() def setup_package(version): # Assemble additional setup commands cmdclass = {'test': PyTest} install_requires = [] for r in read('requirements.txt').split('\n'): r = r.strip() if r == '': continue extra = False for e, v in EXTRAS_REQUIRE.items(): if r.startswith(v[0]): EXTRAS_REQUIRE[e] = [r] extra = True if not extra: install_requires.append(r) command_options = {'test': {}} if COVERAGE_XML: command_options['test']['cov_xml'] = 'setup.py', True if COVERAGE_HTML: command_options['test']['cov_html'] = 'setup.py', True setup( name=NAME, version=version, url=URL, author=AUTHOR, author_email=AUTHOR_EMAIL, description=DESCRIPTION, license=LICENSE, keywords=KEYWORDS, long_description=read('README.rst'), classifiers=CLASSIFIERS, packages=find_packages(exclude=['tests', 'tests.*']), package_data={MAIN_PACKAGE: ["*.json"]}, python_requires='>=2.7', install_requires=install_requires, extras_require=EXTRAS_REQUIRE, setup_requires='flake8', cmdclass=cmdclass, command_options=command_options, entry_points={'console_scripts': CONSOLE_SCRIPTS}, ) if __name__ == '__main__': old_modules = sys.modules.copy() try: from patroni import check_psycopg2, fatal, __version__ finally: sys.modules.clear() sys.modules.update(old_modules) if sys.version_info < (2, 7, 0): fatal('Patroni needs to be run with Python 2.7+') check_psycopg2() setup_package(__version__) patroni-1.6.4/tests/000077500000000000000000000000001361356115100143425ustar00rootroot00000000000000patroni-1.6.4/tests/__init__.py000066400000000000000000000207201361356115100164540ustar00rootroot00000000000000import datetime import os import shutil import unittest from mock import Mock, patch import psycopg2 import urllib3 from patroni.dcs import Leader, Member from patroni.postgresql import Postgresql from patroni.postgresql.config import ConfigHandler from patroni.utils import RetryFailedError class SleepException(Exception): pass class MockResponse(object): def __init__(self, status_code=200): self.status_code = status_code self.content = '{}' @property def data(self): return self.content.encode('utf-8') @property def status(self): return self.status_code @staticmethod def getheader(*args): return '' def requests_get(url, **kwargs): members = '[{"id":14855829450254237642,"peerURLs":["http://localhost:2380","http://localhost:7001"],' +\ '"name":"default","clientURLs":["http://localhost:2379","http://localhost:4001"]}]' response = MockResponse() if url.startswith('http://local'): raise urllib3.exceptions.HTTPError() elif ':8011/patroni' in url: response.content = '{"role": "replica", "xlog": {"received_location": 0}, "tags": {}}' elif url.endswith('/members'): response.content = '[{}]' if url.startswith('http://error') else members elif url.startswith('http://exhibitor'): response.content = '{"servers":["127.0.0.1","127.0.0.2","127.0.0.3"],"port":2181}' elif url.endswith(':8011/reinitialize'): data = kwargs.get('data', '') if ' false}' in data: response.status_code = 503 response.content = 'restarting after failure already in progress' else: response.status_code = 404 return response class MockPostmaster(object): def __init__(self, is_running=True, is_single_master=False): self.is_running = Mock(return_value=is_running) self.is_single_master = Mock(return_value=is_single_master) self.wait_for_user_backends_to_close = Mock() self.signal_stop = Mock(return_value=None) self.wait = Mock() class MockCursor(object): def __init__(self, connection): self.connection = connection self.closed = False self.rowcount = 0 self.results = [] def execute(self, sql, *params): if sql.startswith('blabla'): raise psycopg2.ProgrammingError() elif sql == 'CHECKPOINT' or sql.startswith('SELECT pg_catalog.pg_create_'): raise psycopg2.OperationalError() elif sql.startswith('RetryFailedError'): raise RetryFailedError('retry') elif sql.startswith('SELECT slot_name'): self.results = [('blabla', 'physical'), ('foobar', 'physical'), ('ls', 'logical', 'a', 'b')] elif sql.startswith('SELECT CASE WHEN pg_catalog.pg_is_in_recovery()'): self.results = [(1, 2, 1)] elif sql.startswith('SELECT pg_catalog.pg_is_in_recovery()'): self.results = [(False, 2)] elif sql.startswith('SELECT pg_catalog.to_char'): replication_info = '[{"application_name":"walreceiver","client_addr":"1.2.3.4",' +\ '"state":"streaming","sync_state":"async","sync_priority":0}]' self.results = [('', 0, '', '', '', '', False, replication_info)] elif sql.startswith('SELECT name, setting'): self.results = [('wal_segment_size', '2048', '8kB', 'integer', 'internal'), ('wal_block_size', '8192', None, 'integer', 'internal'), ('shared_buffers', '16384', '8kB', 'integer', 'postmaster'), ('wal_buffers', '-1', '8kB', 'integer', 'postmaster'), ('search_path', 'public', None, 'string', 'user'), ('port', '5433', None, 'integer', 'postmaster'), ('listen_addresses', '*', None, 'string', 'postmaster'), ('autovacuum', 'on', None, 'bool', 'sighup'), ('unix_socket_directories', '/tmp', None, 'string', 'postmaster')] elif sql.startswith('IDENTIFY_SYSTEM'): self.results = [('1', 2, '0/402EEC0', '')] elif sql.startswith('SELECT isdir, modification'): self.results = [(False, datetime.datetime.now())] elif sql.startswith('SELECT pg_catalog.pg_read_file'): self.results = [('1\t0/40159C0\tno recovery target specified\n\n' '2\t1/40159C0\tno recovery target specified\n',)] elif sql.startswith('TIMELINE_HISTORY '): self.results = [('', b'x\t0/40159C0\tno recovery target specified\n\n' b'1\t0/40159C0\tno recovery target specified\n\n' b'2\t0/402DD98\tno recovery target specified\n\n' b'3\t0/403DD98\tno recovery target specified\n')] else: self.results = [(None, None, None, None, None, None, None, None, None, None)] def fetchone(self): return self.results[0] def fetchall(self): return self.results def __iter__(self): for i in self.results: yield i def __enter__(self): return self def __exit__(self, *args): pass class MockConnect(object): server_version = 99999 autocommit = False closed = 0 def cursor(self): return MockCursor(self) def __enter__(self): return self def __exit__(self, *args): pass @staticmethod def close(): pass def psycopg2_connect(*args, **kwargs): return MockConnect() class PostgresInit(unittest.TestCase): _PARAMETERS = {'wal_level': 'hot_standby', 'max_replication_slots': 5, 'f.oo': 'bar', 'search_path': 'public', 'hot_standby': 'on', 'max_wal_senders': 5, 'wal_keep_segments': 8, 'wal_log_hints': 'on', 'max_locks_per_transaction': 64, 'max_worker_processes': 8, 'max_connections': 100, 'max_prepared_transactions': 0, 'track_commit_timestamp': 'off', 'unix_socket_directories': '/tmp', 'trigger_file': 'bla', 'stats_temp_directory': '/tmp'} @patch('psycopg2.connect', psycopg2_connect) @patch.object(ConfigHandler, 'write_postgresql_conf', Mock()) @patch.object(ConfigHandler, 'replace_pg_hba', Mock()) @patch.object(ConfigHandler, 'replace_pg_ident', Mock()) @patch.object(Postgresql, 'get_postgres_role_from_data_directory', Mock(return_value='master')) def setUp(self): data_dir = os.path.join('data', 'test0') self.p = Postgresql({'name': 'postgresql0', 'scope': 'batman', 'data_dir': data_dir, 'config_dir': data_dir, 'retry_timeout': 10, 'krbsrvname': 'postgres', 'pgpass': os.path.join(data_dir, 'pgpass0'), 'listen': '127.0.0.2, 127.0.0.3:5432', 'connect_address': '127.0.0.2:5432', 'authentication': {'superuser': {'username': 'foo', 'password': 'test'}, 'replication': {'username': '', 'password': 'rep-pass'}}, 'remove_data_directory_on_rewind_failure': True, 'use_pg_rewind': True, 'pg_ctl_timeout': 'bla', 'parameters': self._PARAMETERS, 'recovery_conf': {'foo': 'bar'}, 'pg_hba': ['host all all 0.0.0.0/0 md5'], 'pg_ident': ['krb realm postgres'], 'callbacks': {'on_start': 'true', 'on_stop': 'true', 'on_reload': 'true', 'on_restart': 'true', 'on_role_change': 'true'}}) class BaseTestPostgresql(PostgresInit): def setUp(self): super(BaseTestPostgresql, self).setUp() if not os.path.exists(self.p.data_dir): os.makedirs(self.p.data_dir) self.leadermem = Member(0, 'leader', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5435/postgres'}) self.leader = Leader(-1, 28, self.leadermem) self.other = Member(0, 'test-1', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5433/postgres', 'tags': {'replicatefrom': 'leader'}}) self.me = Member(0, 'test0', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5434/postgres'}) def tearDown(self): if os.path.exists(self.p.data_dir): shutil.rmtree(self.p.data_dir) patroni-1.6.4/tests/test_api.py000066400000000000000000000434651361356115100165400ustar00rootroot00000000000000import datetime import json import psycopg2 import unittest import socket from mock import Mock, PropertyMock, patch from patroni.api import RestApiHandler, RestApiServer from patroni.dcs import ClusterConfig, Member from patroni.ha import _MemberStatus from patroni.utils import tzutc from six import BytesIO as IO from six.moves import BaseHTTPServer from . import psycopg2_connect, MockCursor from .test_ha import get_cluster_initialized_without_leader future_restart_time = datetime.datetime.now(tzutc) + datetime.timedelta(days=5) postmaster_start_time = datetime.datetime.now(tzutc) class MockPostgresql(object): name = 'test' state = 'running' role = 'master' server_version = '999999' sysid = 'dummysysid' scope = 'dummy' pending_restart = True wal_name = 'wal' lsn_name = 'lsn' @staticmethod def connection(): return psycopg2_connect() @staticmethod def postmaster_start_time(): return str(postmaster_start_time) @staticmethod def replica_cached_timeline(_): return 2 class MockWatchdog(object): is_healthy = False class MockHa(object): state_handler = MockPostgresql() watchdog = MockWatchdog() @staticmethod def is_leader(): return False @staticmethod def reinitialize(_): return 'reinitialize' @staticmethod def restart(*args, **kwargs): return (True, '') @staticmethod def restart_scheduled(): return False @staticmethod def delete_future_restart(): return True @staticmethod def fetch_nodes_statuses(members): return [_MemberStatus(None, True, None, 0, None, {}, False)] @staticmethod def schedule_future_restart(data): return True @staticmethod def is_lagging(wal): return False @staticmethod def get_effective_tags(): return {'nosync': True} @staticmethod def wakeup(): pass @staticmethod def is_paused(): return True @staticmethod def is_standby_cluster(): return False class MockLogger(object): NORMAL_LOG_QUEUE_SIZE = 2 queue_size = 3 records_lost = 1 class MockPatroni(object): ha = MockHa() config = Mock() postgresql = ha.state_handler dcs = Mock() logger = MockLogger() tags = {} version = '0.00' noloadbalance = PropertyMock(return_value=False) scheduled_restart = {'schedule': future_restart_time, 'postmaster_start_time': postgresql.postmaster_start_time()} @staticmethod def sighup_handler(): pass class MockRequest(object): def __init__(self, request): self.request = request.encode('utf-8') def makefile(self, *args, **kwargs): return IO(self.request) def sendall(self, *args, **kwargs): pass class MockRestApiServer(RestApiServer): def __init__(self, Handler, request, config=None): self.socket = 0 self.serve_forever = Mock() MockRestApiServer._BaseServer__is_shut_down = Mock() MockRestApiServer._BaseServer__shutdown_request = True config = config or {'listen': '127.0.0.1:8008', 'auth': 'test:test', 'certfile': 'dumb', 'verify_client': 'a'} super(MockRestApiServer, self).__init__(MockPatroni(), config) Handler(MockRequest(request), ('0.0.0.0', 8080), self) @patch('ssl.SSLContext.load_cert_chain', Mock()) @patch('ssl.SSLContext.wrap_socket', Mock(return_value=0)) @patch.object(BaseHTTPServer.HTTPServer, '__init__', Mock()) class TestRestApiHandler(unittest.TestCase): _authorization = '\nAuthorization: Basic dGVzdDp0ZXN0' def test_do_GET(self): MockRestApiServer(RestApiHandler, 'GET /replica') MockRestApiServer(RestApiHandler, 'GET /read-only') with patch.object(RestApiHandler, 'get_postgresql_status', Mock(return_value={})): MockRestApiServer(RestApiHandler, 'GET /replica') with patch.object(RestApiHandler, 'get_postgresql_status', Mock(return_value={'role': 'master'})): MockRestApiServer(RestApiHandler, 'GET /replica') with patch.object(RestApiHandler, 'get_postgresql_status', Mock(return_value={'state': 'running'})): MockRestApiServer(RestApiHandler, 'GET /health') MockRestApiServer(RestApiHandler, 'GET /master') MockPatroni.dcs.cluster.sync.sync_standby = MockPostgresql.name MockPatroni.dcs.cluster.is_synchronous_mode = Mock(return_value=True) with patch.object(RestApiHandler, 'get_postgresql_status', Mock(return_value={'role': 'replica'})): MockRestApiServer(RestApiHandler, 'GET /synchronous') with patch.object(RestApiHandler, 'get_postgresql_status', Mock(return_value={'role': 'replica'})): MockPatroni.dcs.cluster.sync.sync_standby = '' MockRestApiServer(RestApiHandler, 'GET /asynchronous') MockPatroni.ha.is_leader = Mock(return_value=True) MockRestApiServer(RestApiHandler, 'GET /replica') MockPatroni.dcs.cluster = None with patch.object(RestApiHandler, 'get_postgresql_status', Mock(return_value={'role': 'master'})): MockRestApiServer(RestApiHandler, 'GET /master') with patch.object(MockHa, 'restart_scheduled', Mock(return_value=True)): MockRestApiServer(RestApiHandler, 'GET /master') self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /master')) with patch.object(RestApiServer, 'query', Mock(return_value=[('', 1, '', '', '', '', False, '')])): self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /patroni')) with patch.object(MockHa, 'is_standby_cluster', Mock(return_value=True)): MockRestApiServer(RestApiHandler, 'GET /standby_leader') def test_do_OPTIONS(self): self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'OPTIONS / HTTP/1.0')) @patch.object(MockPostgresql, 'state', PropertyMock(return_value='stopped')) def test_do_GET_patroni(self): self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /patroni')) def test_basicauth(self): self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'POST /restart HTTP/1.0')) MockRestApiServer(RestApiHandler, 'POST /restart HTTP/1.0\nAuthorization:') @patch.object(MockPatroni, 'dcs') def test_do_GET_cluster(self, mock_dcs): mock_dcs.cluster = get_cluster_initialized_without_leader() mock_dcs.cluster.members[1].data['xlog_location'] = 11 self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /cluster')) @patch.object(MockPatroni, 'dcs') def test_do_GET_history(self, mock_dcs): mock_dcs.cluster = get_cluster_initialized_without_leader() self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /history')) @patch.object(MockPatroni, 'dcs') def test_do_GET_config(self, mock_dcs): mock_dcs.cluster.config.data = {} self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /config')) mock_dcs.cluster.config = None self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /config')) @patch.object(MockPatroni, 'dcs') def test_do_PATCH_config(self, mock_dcs): config = {'postgresql': {'use_slots': False, 'use_pg_rewind': True, 'parameters': {'wal_level': 'logical'}}} mock_dcs.get_cluster.return_value.config = ClusterConfig.from_node(1, json.dumps(config)) request = 'PATCH /config HTTP/1.0' + self._authorization self.assertIsNotNone(MockRestApiServer(RestApiHandler, request)) request += '\nContent-Length: ' self.assertIsNotNone(MockRestApiServer(RestApiHandler, request + '34\n\n{"postgresql":{"use_slots":false}}')) config['ttl'] = 5 config['postgresql'].update({'use_slots': {'foo': True}, "parameters": None}) config = json.dumps(config) request += str(len(config)) + '\n\n' + config MockRestApiServer(RestApiHandler, request) mock_dcs.set_config_value.return_value = False MockRestApiServer(RestApiHandler, request) @patch.object(MockPatroni, 'dcs') def test_do_PUT_config(self, mock_dcs): mock_dcs.get_cluster.return_value.config = ClusterConfig.from_node(1, '{}') request = 'PUT /config HTTP/1.0' + self._authorization + '\nContent-Length: ' self.assertIsNotNone(MockRestApiServer(RestApiHandler, request + '2\n\n{}')) config = '{"foo": "bar"}' request += str(len(config)) + '\n\n' + config MockRestApiServer(RestApiHandler, request) mock_dcs.set_config_value.return_value = False MockRestApiServer(RestApiHandler, request) mock_dcs.get_cluster.return_value.config = ClusterConfig.from_node(1, config) MockRestApiServer(RestApiHandler, request) @patch.object(MockPatroni, 'sighup_handler', Mock()) def test_do_POST_reload(self): self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'POST /reload HTTP/1.0' + self._authorization)) @patch.object(MockPatroni, 'dcs') def test_do_POST_restart(self, mock_dcs): mock_dcs.get_cluster.return_value.is_paused.return_value = False request = 'POST /restart HTTP/1.0' + self._authorization self.assertIsNotNone(MockRestApiServer(RestApiHandler, request)) with patch.object(MockHa, 'restart', Mock(side_effect=Exception)): MockRestApiServer(RestApiHandler, request) post = request + '\nContent-Length: ' def make_request(request=None, **kwargs): request = json.dumps(kwargs) if request is None else request return '{0}{1}\n\n{2}'.format(post, len(request), request) # empty request request = make_request('') MockRestApiServer(RestApiHandler, request) # invalid request request = make_request('foobar=baz') MockRestApiServer(RestApiHandler, request) # wrong role request = make_request(schedule=future_restart_time.isoformat(), role='unknown', postgres_version='9.5.3') MockRestApiServer(RestApiHandler, request) # wrong version request = make_request(schedule=future_restart_time.isoformat(), role='master', postgres_version='9.5.3.1') MockRestApiServer(RestApiHandler, request) # unknown filter request = make_request(schedule=future_restart_time.isoformat(), batman='lives') MockRestApiServer(RestApiHandler, request) # incorrect schedule request = make_request(schedule='2016-08-42 12:45TZ+1', role='master') MockRestApiServer(RestApiHandler, request) # everything fine, but the schedule is missing request = make_request(role='master', postgres_version='9.5.2') MockRestApiServer(RestApiHandler, request) for retval in (True, False): with patch.object(MockHa, 'schedule_future_restart', Mock(return_value=retval)): request = make_request(schedule=future_restart_time.isoformat()) MockRestApiServer(RestApiHandler, request) with patch.object(MockHa, 'restart', Mock(return_value=(retval, "foo"))): request = make_request(role='master', postgres_version='9.5.2') MockRestApiServer(RestApiHandler, request) mock_dcs.get_cluster.return_value.is_paused.return_value = True MockRestApiServer(RestApiHandler, make_request(schedule='2016-08-42 12:45TZ+1', role='master')) # Valid timeout MockRestApiServer(RestApiHandler, make_request(timeout='60s')) # Invalid timeout MockRestApiServer(RestApiHandler, make_request(timeout='42towels')) def test_do_DELETE_restart(self): for retval in (True, False): with patch.object(MockHa, 'delete_future_restart', Mock(return_value=retval)): request = 'DELETE /restart HTTP/1.0' + self._authorization self.assertIsNotNone(MockRestApiServer(RestApiHandler, request)) @patch.object(MockPatroni, 'dcs') def test_do_POST_reinitialize(self, mock_dcs): cluster = mock_dcs.get_cluster.return_value cluster.is_paused.return_value = False request = 'POST /reinitialize HTTP/1.0' + self._authorization + '\nContent-Length: 15\n\n{"force": true}' MockRestApiServer(RestApiHandler, request) with patch.object(MockHa, 'reinitialize', Mock(return_value=None)): MockRestApiServer(RestApiHandler, request) @patch('time.sleep', Mock()) def test_RestApiServer_query(self): with patch.object(MockCursor, 'execute', Mock(side_effect=psycopg2.OperationalError)): self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /patroni')) with patch.object(MockPostgresql, 'connection', Mock(side_effect=psycopg2.OperationalError)): self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /patroni')) @patch('time.sleep', Mock()) @patch.object(MockPatroni, 'dcs') def test_do_POST_switchover(self, dcs): dcs.loop_wait = 10 cluster = dcs.get_cluster.return_value cluster.is_synchronous_mode.return_value = False cluster.is_paused.return_value = False post = 'POST /switchover HTTP/1.0' + self._authorization + '\nContent-Length: ' MockRestApiServer(RestApiHandler, post + '7\n\n{"1":2}') request = post + '0\n\n' MockRestApiServer(RestApiHandler, request) cluster.leader.name = 'postgresql1' MockRestApiServer(RestApiHandler, request) request = post + '25\n\n{"leader": "postgresql1"}' cluster.is_paused.return_value = True MockRestApiServer(RestApiHandler, request) cluster.is_paused.return_value = False for cluster.is_synchronous_mode.return_value in (True, False): MockRestApiServer(RestApiHandler, request) cluster.leader.name = 'postgresql2' request = post + '53\n\n{"leader": "postgresql1", "candidate": "postgresql2"}' MockRestApiServer(RestApiHandler, request) cluster.leader.name = 'postgresql1' for cluster.is_synchronous_mode.return_value in (True, False): MockRestApiServer(RestApiHandler, request) cluster.members = [Member(0, 'postgresql0', 30, {'api_url': 'http'}), Member(0, 'postgresql2', 30, {'api_url': 'http'})] MockRestApiServer(RestApiHandler, request) cluster.failover = None MockRestApiServer(RestApiHandler, request) dcs.get_cluster.side_effect = [cluster] MockRestApiServer(RestApiHandler, request) cluster2 = cluster.copy() cluster2.leader.name = 'postgresql0' cluster2.is_unlocked.return_value = False dcs.get_cluster.side_effect = [cluster, cluster2] MockRestApiServer(RestApiHandler, request) cluster2.leader.name = 'postgresql2' dcs.get_cluster.side_effect = [cluster, cluster2] MockRestApiServer(RestApiHandler, request) dcs.get_cluster.side_effect = None dcs.manual_failover.return_value = False MockRestApiServer(RestApiHandler, request) dcs.manual_failover.return_value = True with patch.object(MockHa, 'fetch_nodes_statuses', Mock(return_value=[])): MockRestApiServer(RestApiHandler, request) # Valid future date request = post + '103\n\n{"leader": "postgresql1", "member": "postgresql2",' +\ ' "scheduled_at": "6016-02-15T18:13:30.568224+01:00"}' MockRestApiServer(RestApiHandler, request) with patch.object(MockPatroni, 'dcs') as d: d.manual_failover.return_value = False MockRestApiServer(RestApiHandler, request) # Exception: No timezone specified request = post + '97\n\n{"leader": "postgresql1", "member": "postgresql2",' +\ ' "scheduled_at": "6016-02-15T18:13:30.568224"}' MockRestApiServer(RestApiHandler, request) # Exception: Scheduled in the past request = post + '103\n\n{"leader": "postgresql1", "member": "postgresql2", "scheduled_at": "' MockRestApiServer(RestApiHandler, request + '1016-02-15T18:13:30.568224+01:00"}') # Invalid date self.assertIsNotNone(MockRestApiServer(RestApiHandler, request + '2010-02-29T18:13:30.568224+01:00"}')) @patch.object(MockPatroni, 'dcs', Mock()) def test_do_POST_failover(self): post = 'POST /failover HTTP/1.0' + self._authorization + '\nContent-Length: ' MockRestApiServer(RestApiHandler, post + '14\n\n{"leader":"1"}') MockRestApiServer(RestApiHandler, post + '37\n\n{"candidate":"2","scheduled_at": "1"}') @patch('ssl.SSLContext.load_cert_chain', Mock()) @patch('ssl.SSLContext.wrap_socket', Mock(return_value=0)) @patch.object(BaseHTTPServer.HTTPServer, '__init__', Mock()) class TestRestApiServer(unittest.TestCase): def test_reload_config(self): bad_config = {'listen': 'foo'} self.assertRaises(ValueError, MockRestApiServer, None, '', bad_config) srv = MockRestApiServer(Mock(), '', {'listen': '*:8008', 'certfile': 'a', 'verify_client': 'required'}) self.assertRaises(ValueError, srv.reload_config, bad_config) self.assertRaises(ValueError, srv.reload_config, {}) with patch.object(socket.socket, 'setsockopt', Mock(side_effect=socket.error)): srv.reload_config({'listen': ':8008'}) def test_check_auth(self): srv = MockRestApiServer(Mock(), '', {'listen': '*:8008', 'certfile': 'a', 'verify_client': 'required'}) mock_rh = Mock() mock_rh.request.getpeercert.return_value = None self.assertIsNot(srv.check_auth(mock_rh), True) def test_handle_error(self): try: raise Exception() except Exception: self.assertIsNone(MockRestApiServer.handle_error(None, ('127.0.0.1', 55555))) def test_socket_error(self): with patch.object(BaseHTTPServer.HTTPServer, '__init__', Mock(side_effect=socket.error)): self.assertRaises(socket.error, MockRestApiServer, Mock(), '', {'listen': '*:8008'}) patroni-1.6.4/tests/test_async_executor.py000066400000000000000000000013641361356115100210120ustar00rootroot00000000000000import unittest from mock import Mock, patch from patroni.async_executor import AsyncExecutor, CriticalTask from threading import Thread class TestAsyncExecutor(unittest.TestCase): def setUp(self): self.a = AsyncExecutor(Mock(), Mock()) @patch.object(Thread, 'start', Mock()) def test_run_async(self): self.a.run_async(Mock(return_value=True)) def test_run(self): self.a.run(Mock(side_effect=Exception())) def test_cancel(self): self.a.cancel() self.a.schedule('foo') self.a.cancel() self.a.run(Mock()) class TestCriticalTask(unittest.TestCase): def test_completed_task(self): ct = CriticalTask() ct.complete(1) self.assertFalse(ct.cancel()) patroni-1.6.4/tests/test_aws.py000066400000000000000000000037471361356115100165600ustar00rootroot00000000000000import boto.ec2 import sys import unittest import urllib3 from mock import Mock, patch from collections import namedtuple from patroni.scripts.aws import AWSConnection, main as _main class MockEc2Connection(object): @staticmethod def get_all_volumes(*args, **kwargs): oid = namedtuple('Volume', 'id') return [oid(id='a'), oid(id='b')] @staticmethod def create_tags(objects, *args, **kwargs): if len(objects) == 0: raise boto.exception.BotoServerError(503, 'Service Unavailable', 'Request limit exceeded') return True @patch('boto.ec2.connect_to_region', Mock(return_value=MockEc2Connection())) class TestAWSConnection(unittest.TestCase): @patch('patroni.scripts.aws.requests_get', Mock(return_value=urllib3.HTTPResponse( status=200, body=b'{"instanceId": "012345", "region": "eu-west-1"}'))) def setUp(self): self.conn = AWSConnection('test') def test_on_role_change(self): self.assertTrue(self.conn.on_role_change('master')) with patch.object(MockEc2Connection, 'get_all_volumes', Mock(return_value=[])): self.conn._retry.max_tries = 1 self.assertFalse(self.conn.on_role_change('master')) @patch('patroni.scripts.aws.requests_get', Mock(side_effect=Exception('foo'))) def test_non_aws(self): conn = AWSConnection('test') self.assertFalse(conn.on_role_change("master")) @patch('patroni.scripts.aws.requests_get', Mock(return_value=urllib3.HTTPResponse(status=200, body=b'foo'))) def test_aws_bizare_response(self): conn = AWSConnection('test') self.assertFalse(conn.aws_available()) @patch('patroni.scripts.aws.requests_get', Mock(return_value=urllib3.HTTPResponse( status=200, body=b'{"instanceId": "012345", "region": "eu-west-1"}'))) @patch('sys.exit', Mock()) def test_main(self): self.assertIsNone(_main()) sys.argv = ['aws.py', 'on_start', 'replica', 'foo'] self.assertIsNone(_main()) patroni-1.6.4/tests/test_bootstrap.py000066400000000000000000000265701361356115100200020ustar00rootroot00000000000000import os from mock import Mock, PropertyMock, patch from patroni.async_executor import CriticalTask from patroni.postgresql import Postgresql from patroni.postgresql.bootstrap import Bootstrap from patroni.postgresql.cancellable import CancellableSubprocess from patroni.postgresql.config import ConfigHandler from . import psycopg2_connect, BaseTestPostgresql @patch('subprocess.call', Mock(return_value=0)) @patch('psycopg2.connect', psycopg2_connect) @patch('os.rename', Mock()) class TestBootstrap(BaseTestPostgresql): @patch('patroni.postgresql.CallbackExecutor', Mock()) def setUp(self): super(TestBootstrap, self).setUp() self.b = self.p.bootstrap @patch('time.sleep', Mock()) @patch.object(CancellableSubprocess, 'call') @patch.object(Postgresql, 'remove_data_directory', Mock(return_value=True)) @patch.object(Postgresql, 'data_directory_empty', Mock(return_value=False)) @patch.object(Bootstrap, '_post_restore', Mock(side_effect=OSError)) def test_create_replica(self, mock_cancellable_subprocess_call): self.p.config._config['create_replica_methods'] = ['pgBackRest'] self.p.config._config['pgBackRest'] = {'command': 'pgBackRest', 'keep_data': True, 'no_params': True} mock_cancellable_subprocess_call.return_value = 0 self.assertEqual(self.b.create_replica(self.leader), 0) self.p.config._config['create_replica_methods'] = ['basebackup'] self.p.config._config['basebackup'] = [{'max_rate': '100M'}, 'no-sync'] self.assertEqual(self.b.create_replica(self.leader), 0) self.p.config._config['basebackup'] = [{'max_rate': '100M', 'compress': '9'}] with patch('patroni.postgresql.bootstrap.logger.error', new_callable=Mock()) as mock_logger: self.b.create_replica(self.leader) mock_logger.assert_called_once() self.assertTrue("only one key-value is allowed and value should be a string" in mock_logger.call_args[0][0], "not matching {0}".format(mock_logger.call_args[0][0])) self.p.config._config['basebackup'] = [42] with patch('patroni.postgresql.bootstrap.logger.error', new_callable=Mock()) as mock_logger: self.b.create_replica(self.leader) mock_logger.assert_called_once() self.assertTrue("value should be string value or a single key-value pair" in mock_logger.call_args[0][0], "not matching {0}".format(mock_logger.call_args[0][0])) self.p.config._config['basebackup'] = {"foo": "bar"} self.assertEqual(self.b.create_replica(self.leader), 0) self.p.config._config['create_replica_methods'] = ['wale', 'basebackup'] del self.p.config._config['basebackup'] mock_cancellable_subprocess_call.return_value = 1 self.assertEqual(self.b.create_replica(self.leader), 1) mock_cancellable_subprocess_call.side_effect = Exception('foo') self.assertEqual(self.b.create_replica(self.leader), 1) mock_cancellable_subprocess_call.side_effect = [1, 0] self.assertEqual(self.b.create_replica(self.leader), 0) mock_cancellable_subprocess_call.side_effect = [Exception(), 0] self.assertEqual(self.b.create_replica(self.leader), 0) self.p.cancellable.cancel() self.assertEqual(self.b.create_replica(self.leader), 1) @patch('time.sleep', Mock()) @patch.object(CancellableSubprocess, 'call') @patch.object(Postgresql, 'remove_data_directory', Mock(return_value=True)) @patch.object(Bootstrap, '_post_restore', Mock(side_effect=OSError)) def test_create_replica_old_format(self, mock_cancellable_subprocess_call): """ The same test as before but with old 'create_replica_method' to test backward compatibility """ self.p.config._config['create_replica_method'] = ['wale', 'basebackup'] self.p.config._config['wale'] = {'command': 'foo'} mock_cancellable_subprocess_call.return_value = 0 self.assertEqual(self.b.create_replica(self.leader), 0) del self.p.config._config['wale'] self.assertEqual(self.b.create_replica(self.leader), 0) self.p.config._config['create_replica_method'] = ['wale'] mock_cancellable_subprocess_call.return_value = 1 self.assertEqual(self.b.create_replica(self.leader), 1) def test_basebackup(self): self.p.cancellable.cancel() self.b.basebackup(None, None, {'foo': 'bar'}) def test__initdb(self): self.assertRaises(Exception, self.b.bootstrap, {'initdb': [{'pgdata': 'bar'}]}) self.assertRaises(Exception, self.b.bootstrap, {'initdb': [{'foo': 'bar', 1: 2}]}) self.assertRaises(Exception, self.b.bootstrap, {'initdb': [1]}) self.assertRaises(Exception, self.b.bootstrap, {'initdb': 1}) @patch.object(CancellableSubprocess, 'call', Mock()) @patch.object(Postgresql, 'is_running', Mock(return_value=True)) @patch.object(Postgresql, 'data_directory_empty', Mock(return_value=False)) @patch.object(Postgresql, 'controldata', Mock(return_value={'max_connections setting': 100, 'max_prepared_xacts setting': 0, 'max_locks_per_xact setting': 64})) def test_bootstrap(self): with patch('subprocess.call', Mock(return_value=1)): self.assertFalse(self.b.bootstrap({})) config = {'users': {'replicator': {'password': 'rep-pass', 'options': ['replication']}}} with patch.object(Postgresql, 'is_running', Mock(return_value=False)),\ patch('multiprocessing.Process', Mock(side_effect=Exception)),\ patch('multiprocessing.get_context', Mock(side_effect=Exception), create=True): self.assertRaises(Exception, self.b.bootstrap, config) with open(os.path.join(self.p.data_dir, 'pg_hba.conf')) as f: lines = f.readlines() self.assertTrue('host all all 0.0.0.0/0 md5\n' in lines) self.p.config._config.pop('pg_hba') config.update({'post_init': '/bin/false', 'pg_hba': ['host replication replicator 127.0.0.1/32 md5', 'hostssl all all 0.0.0.0/0 md5', 'host all all 0.0.0.0/0 md5']}) self.b.bootstrap(config) with open(os.path.join(self.p.data_dir, 'pg_hba.conf')) as f: lines = f.readlines() self.assertTrue('host replication replicator 127.0.0.1/32 md5\n' in lines) @patch.object(CancellableSubprocess, 'call') @patch.object(Postgresql, 'get_major_version', Mock(return_value=90600)) @patch.object(Postgresql, 'controldata', Mock(return_value={'Database cluster state': 'in production'})) def test_custom_bootstrap(self, mock_cancellable_subprocess_call): self.p.config._config.pop('pg_hba') config = {'method': 'foo', 'foo': {'command': 'bar'}} mock_cancellable_subprocess_call.return_value = 1 self.assertFalse(self.b.bootstrap(config)) mock_cancellable_subprocess_call.return_value = 0 with patch('multiprocessing.Process', Mock(side_effect=Exception("42"))),\ patch('multiprocessing.get_context', Mock(side_effect=Exception("42")), create=True),\ patch('os.path.isfile', Mock(return_value=True)),\ patch('os.unlink', Mock()),\ patch.object(ConfigHandler, 'save_configuration_files', Mock()),\ patch.object(ConfigHandler, 'restore_configuration_files', Mock()),\ patch.object(ConfigHandler, 'write_recovery_conf', Mock()): with self.assertRaises(Exception) as e: self.b.bootstrap(config) self.assertEqual(str(e.exception), '42') config['foo']['recovery_conf'] = {'foo': 'bar'} with self.assertRaises(Exception) as e: self.b.bootstrap(config) self.assertEqual(str(e.exception), '42') mock_cancellable_subprocess_call.side_effect = Exception self.assertFalse(self.b.bootstrap(config)) @patch('time.sleep', Mock()) @patch('os.unlink', Mock()) @patch('shutil.copy', Mock()) @patch('os.path.isfile', Mock(return_value=True)) @patch.object(Bootstrap, 'call_post_bootstrap', Mock(return_value=True)) @patch.object(Bootstrap, '_custom_bootstrap', Mock(return_value=True)) @patch.object(Postgresql, 'start', Mock(return_value=True)) @patch.object(Postgresql, 'get_major_version', Mock(return_value=110000)) def test_post_bootstrap(self): config = {'method': 'foo', 'foo': {'command': 'bar'}} self.b.bootstrap(config) task = CriticalTask() with patch.object(Bootstrap, 'create_or_update_role', Mock(side_effect=Exception)): self.b.post_bootstrap({}, task) self.assertFalse(task.result) self.p.config._config.pop('pg_hba') self.b.post_bootstrap({}, task) self.assertTrue(task.result) self.b.bootstrap(config) with patch.object(Postgresql, 'pending_restart', PropertyMock(return_value=True)), \ patch.object(Postgresql, 'restart', Mock()) as mock_restart: self.b.post_bootstrap({}, task) mock_restart.assert_called_once() self.b.bootstrap(config) self.p.set_state('stopped') self.p.reload_config({'authentication': {'superuser': {'username': 'p', 'password': 'p'}, 'replication': {'username': 'r', 'password': 'r'}, 'rewind': {'username': 'rw', 'password': 'rw'}}, 'listen': '*', 'retry_timeout': 10, 'parameters': {'wal_level': '', 'hba_file': 'foo'}}) with patch.object(Postgresql, 'major_version', PropertyMock(return_value=110000)), \ patch.object(Postgresql, 'restart', Mock()) as mock_restart: self.b.post_bootstrap({}, task) mock_restart.assert_called_once() @patch.object(CancellableSubprocess, 'call') def test_call_post_bootstrap(self, mock_cancellable_subprocess_call): mock_cancellable_subprocess_call.return_value = 1 self.assertFalse(self.b.call_post_bootstrap({'post_init': '/bin/false'})) mock_cancellable_subprocess_call.return_value = 0 self.p.config.superuser.pop('username') self.assertTrue(self.b.call_post_bootstrap({'post_init': '/bin/false'})) mock_cancellable_subprocess_call.assert_called() args, kwargs = mock_cancellable_subprocess_call.call_args self.assertTrue('PGPASSFILE' in kwargs['env']) self.assertEqual(args[0], ['/bin/false', 'dbname=postgres host=127.0.0.2 port=5432']) mock_cancellable_subprocess_call.reset_mock() self.p.config._local_address.pop('host') self.assertTrue(self.b.call_post_bootstrap({'post_init': '/bin/false'})) mock_cancellable_subprocess_call.assert_called() self.assertEqual(mock_cancellable_subprocess_call.call_args[0][0], ['/bin/false', 'dbname=postgres port=5432']) mock_cancellable_subprocess_call.side_effect = OSError self.assertFalse(self.b.call_post_bootstrap({'post_init': '/bin/false'})) @patch('os.path.exists', Mock(return_value=True)) @patch('os.unlink', Mock()) @patch.object(Bootstrap, 'create_replica', Mock(return_value=0)) def test_clone(self): self.b.clone(self.leader) patroni-1.6.4/tests/test_callback_executor.py000066400000000000000000000021051361356115100214230ustar00rootroot00000000000000import psutil import unittest from mock import Mock, patch from patroni.postgresql.callback_executor import CallbackExecutor class TestCallbackExecutor(unittest.TestCase): @patch('psutil.Popen') def test_callback_executor(self, mock_popen): mock_popen.return_value.children.return_value = [] mock_popen.return_value.is_running.return_value = True ce = CallbackExecutor() ce._kill_children = Mock(side_effect=Exception) self.assertIsNone(ce.call([])) ce.join() self.assertIsNone(ce.call([])) mock_popen.return_value.kill.side_effect = psutil.AccessDenied() self.assertIsNone(ce.call([])) ce._process_children = [] mock_popen.return_value.children.side_effect = psutil.Error() mock_popen.return_value.kill.side_effect = psutil.NoSuchProcess(123) self.assertIsNone(ce.call([])) mock_popen.side_effect = Exception ce = CallbackExecutor() ce._condition.wait = Mock(side_effect=[None, Exception]) self.assertIsNone(ce.call([])) ce.join() patroni-1.6.4/tests/test_cancellable.py000066400000000000000000000022501361356115100201770ustar00rootroot00000000000000import psutil import unittest from mock import Mock, patch from patroni.exceptions import PostgresException from patroni.postgresql.cancellable import CancellableSubprocess class TestCancellableSubprocess(unittest.TestCase): def setUp(self): self.c = CancellableSubprocess() def test_call(self): self.c.cancel() self.assertRaises(PostgresException, self.c.call, communicate_input=None) def test__kill_children(self): self.c._process_children = [Mock()] self.c._kill_children() self.c._process_children[0].kill.side_effect = psutil.AccessDenied() self.c._kill_children() self.c._process_children[0].kill.side_effect = psutil.NoSuchProcess(123) self.c._kill_children() @patch('patroni.postgresql.cancellable.polling_loop', Mock(return_value=[0, 0])) def test_cancel(self): self.c._process = Mock() self.c._process.is_running.return_value = True self.c._process.children.side_effect = psutil.Error() self.c._process.suspend.side_effect = psutil.Error() self.c.cancel() self.c._process.is_running.side_effect = [True, False] self.c.cancel() patroni-1.6.4/tests/test_config.py000066400000000000000000000106751361356115100172310ustar00rootroot00000000000000import os import sys import unittest from mock import MagicMock, Mock, patch from patroni.config import Config from six.moves import builtins class TestConfig(unittest.TestCase): @patch('os.path.isfile', Mock(return_value=True)) @patch('json.load', Mock(side_effect=Exception)) @patch.object(builtins, 'open', MagicMock()) def setUp(self): sys.argv = ['patroni.py'] os.environ[Config.PATRONI_CONFIG_VARIABLE] = 'restapi: {}\npostgresql: {data_dir: foo}' self.config = Config(None) def test_set_dynamic_configuration(self): with patch.object(Config, '_build_effective_configuration', Mock(side_effect=Exception)): self.assertIsNone(self.config.set_dynamic_configuration({'foo': 'bar'})) self.assertTrue(self.config.set_dynamic_configuration({'synchronous_mode': True, 'standby_cluster': {}})) def test_reload_local_configuration(self): os.environ.update({ 'PATRONI_NAME': 'postgres0', 'PATRONI_NAMESPACE': '/patroni/', 'PATRONI_SCOPE': 'batman2', 'PATRONI_LOGLEVEL': 'ERROR', 'PATRONI_LOG_LOGGERS': 'patroni.postmaster: WARNING, urllib3: DEBUG', 'PATRONI_RESTAPI_USERNAME': 'username', 'PATRONI_RESTAPI_PASSWORD': 'password', 'PATRONI_RESTAPI_LISTEN': '0.0.0.0:8008', 'PATRONI_RESTAPI_CONNECT_ADDRESS': '127.0.0.1:8008', 'PATRONI_RESTAPI_CERTFILE': '/certfile', 'PATRONI_RESTAPI_KEYFILE': '/keyfile', 'PATRONI_POSTGRESQL_LISTEN': '0.0.0.0:5432', 'PATRONI_POSTGRESQL_CONNECT_ADDRESS': '127.0.0.1:5432', 'PATRONI_POSTGRESQL_DATA_DIR': 'data/postgres0', 'PATRONI_POSTGRESQL_CONFIG_DIR': 'data/postgres0', 'PATRONI_POSTGRESQL_PGPASS': '/tmp/pgpass0', 'PATRONI_ETCD_HOST': '127.0.0.1:2379', 'PATRONI_ETCD_URL': 'https://127.0.0.1:2379', 'PATRONI_ETCD_PROXY': 'http://127.0.0.1:2379', 'PATRONI_ETCD_SRV': 'test', 'PATRONI_ETCD_CACERT': '/cacert', 'PATRONI_ETCD_CERT': '/cert', 'PATRONI_ETCD_KEY': '/key', 'PATRONI_CONSUL_HOST': '127.0.0.1:8500', 'PATRONI_CONSUL_REGISTER_SERVICE': 'on', 'PATRONI_KUBERNETES_LABELS': 'a: b: c', 'PATRONI_KUBERNETES_SCOPE_LABEL': 'a', 'PATRONI_KUBERNETES_PORTS': '[{"name": "postgresql"}]', 'PATRONI_ZOOKEEPER_HOSTS': "'host1:2181','host2:2181'", 'PATRONI_EXHIBITOR_HOSTS': 'host1,host2', 'PATRONI_EXHIBITOR_PORT': '8181', 'PATRONI_foo_HOSTS': '[host1,host2', # Exception in parse_list 'PATRONI_SUPERUSER_USERNAME': 'postgres', 'PATRONI_SUPERUSER_PASSWORD': 'zalando', 'PATRONI_REPLICATION_USERNAME': 'replicator', 'PATRONI_REPLICATION_PASSWORD': 'rep-pass', 'PATRONI_admin_PASSWORD': 'admin', 'PATRONI_admin_OPTIONS': 'createrole,createdb' }) config = Config('postgres0.yml') with patch.object(Config, '_load_config_file', Mock(return_value={'restapi': {}})): with patch.object(Config, '_build_effective_configuration', Mock(side_effect=Exception)): config.reload_local_configuration() self.assertTrue(config.reload_local_configuration()) self.assertIsNone(config.reload_local_configuration()) @patch('tempfile.mkstemp', Mock(return_value=[3000, 'blabla'])) @patch('os.path.exists', Mock(return_value=True)) @patch('os.remove', Mock(side_effect=IOError)) @patch('os.close', Mock(side_effect=IOError)) @patch('shutil.move', Mock(return_value=None)) @patch('json.dump', Mock()) def test_save_cache(self): self.config.set_dynamic_configuration({'ttl': 30, 'postgresql': {'foo': 'bar'}}) with patch('os.fdopen', Mock(side_effect=IOError)): self.config.save_cache() with patch('os.fdopen', MagicMock()): self.config.save_cache() def test_standby_cluster_parameters(self): dynamic_configuration = { 'standby_cluster': { 'create_replica_methods': ['wal_e', 'basebackup'], 'host': 'localhost', 'port': 5432 } } self.config.set_dynamic_configuration(dynamic_configuration) for name, value in dynamic_configuration['standby_cluster'].items(): self.assertEqual(self.config['standby_cluster'][name], value) patroni-1.6.4/tests/test_consul.py000066400000000000000000000230511361356115100172570ustar00rootroot00000000000000import consul import unittest from consul import ConsulException, NotFound from mock import Mock, patch from patroni.dcs.consul import AbstractDCS, Cluster, Consul, ConsulInternalError, \ ConsulError, HTTPClient, InvalidSessionTTL, InvalidSession from . import SleepException def kv_get(self, key, **kwargs): if key == 'service/test/members/postgresql1': return '1', {'Session': 'fd4f44fe-2cac-bba5-a60b-304b51ff39b7'} if key == 'service/test/': return None, None if key == 'service/good/leader': return '1', None if key == 'service/good/': return ('6429', [{'CreateIndex': 1334, 'Flags': 0, 'Key': key + 'failover', 'LockIndex': 0, 'ModifyIndex': 1334, 'Value': b''}, {'CreateIndex': 1334, 'Flags': 0, 'Key': key + 'initialize', 'LockIndex': 0, 'ModifyIndex': 1334, 'Value': b'postgresql0'}, {'CreateIndex': 2621, 'Flags': 0, 'Key': key + 'leader', 'LockIndex': 1, 'ModifyIndex': 2621, 'Session': 'fd4f44fe-2cac-bba5-a60b-304b51ff39b7', 'Value': b'postgresql1'}, {'CreateIndex': 6156, 'Flags': 0, 'Key': key + 'members/postgresql0', 'LockIndex': 1, 'ModifyIndex': 6156, 'Session': '782e6da4-ed02-3aef-7963-99a90ed94b53', 'Value': ('postgres://replicator:rep-pass@127.0.0.1:5432/postgres' + '?application_name=http://127.0.0.1:8008/patroni').encode('utf-8')}, {'CreateIndex': 2630, 'Flags': 0, 'Key': key + 'members/postgresql1', 'LockIndex': 1, 'ModifyIndex': 2630, 'Session': 'fd4f44fe-2cac-bba5-a60b-304b51ff39b7', 'Value': ('postgres://replicator:rep-pass@127.0.0.1:5433/postgres' + '?application_name=http://127.0.0.1:8009/patroni').encode('utf-8')}, {'CreateIndex': 1085, 'Flags': 0, 'Key': key + 'optime/leader', 'LockIndex': 0, 'ModifyIndex': 6429, 'Value': b'4496294792'}, {'CreateIndex': 1085, 'Flags': 0, 'Key': key + 'sync', 'LockIndex': 0, 'ModifyIndex': 6429, 'Value': b'{"leader": "leader", "sync_standby": null}'}]) raise ConsulException class TestHTTPClient(unittest.TestCase): def setUp(self): self.client = HTTPClient('127.0.0.1', '8500', 'http', False) self.client.http.request = Mock() def test_get(self): self.client.get(Mock(), '') self.client.get(Mock(), '', {'wait': '1s', 'index': 1, 'token': 'foo'}) self.client.http.request.return_value.status = 500 self.client.http.request.return_value.data = b'Foo' self.assertRaises(ConsulInternalError, self.client.get, Mock(), '') self.client.http.request.return_value.data = b"Invalid Session TTL '3000000000', must be between [10s=24h0m0s]" self.assertRaises(InvalidSessionTTL, self.client.get, Mock(), '') self.client.http.request.return_value.data = b"invalid session '16492f43-c2d6-5307-432f-e32d6f7bcbd0'" self.assertRaises(InvalidSession, self.client.get, Mock(), '') def test_unknown_method(self): try: self.client.bla(Mock(), '') self.assertFail() except Exception as e: self.assertTrue(isinstance(e, AttributeError)) def test_put(self): self.client.put(Mock(), '/v1/session/create') self.client.put(Mock(), '/v1/session/create', params=[], data='{"foo": "bar"}') @patch.object(consul.Consul.KV, 'get', kv_get) class TestConsul(unittest.TestCase): @patch.object(consul.Consul.Session, 'create', Mock(return_value='fd4f44fe-2cac-bba5-a60b-304b51ff39b7')) @patch.object(consul.Consul.Session, 'renew', Mock(side_effect=NotFound)) @patch.object(consul.Consul.KV, 'get', kv_get) @patch.object(consul.Consul.KV, 'delete', Mock()) def setUp(self): Consul({'ttl': 30, 'scope': 't', 'name': 'p', 'url': 'https://l:1', 'retry_timeout': 10, 'verify': 'on', 'key': 'foo', 'cert': 'bar', 'cacert': 'buz', 'token': 'asd', 'dc': 'dc1', 'register_service': True}) Consul({'ttl': 30, 'scope': 't_', 'name': 'p', 'url': 'https://l:1', 'retry_timeout': 10, 'verify': 'on', 'cert': 'bar', 'cacert': 'buz', 'register_service': True}) self.c = Consul({'ttl': 30, 'scope': 'test', 'name': 'postgresql1', 'host': 'localhost:1', 'retry_timeout': 10, 'register_service': True}) self.c._base_path = '/service/good' self.c.get_cluster() @patch('time.sleep', Mock(side_effect=SleepException)) @patch.object(consul.Consul.Session, 'create', Mock(side_effect=ConsulException)) def test_create_session(self): self.c._session = None self.assertRaises(SleepException, self.c.create_session) @patch.object(consul.Consul.Session, 'renew', Mock(side_effect=NotFound)) @patch.object(consul.Consul.Session, 'create', Mock(side_effect=[InvalidSessionTTL, ConsulException])) @patch.object(consul.Consul.Agent, 'self', Mock(return_value={'Config': {'SessionTTLMin': 0}})) @patch.object(HTTPClient, 'set_ttl', Mock(side_effect=ValueError)) def test_referesh_session(self): self.c._session = '1' self.assertFalse(self.c.refresh_session()) self.c._last_session_refresh = 0 self.assertRaises(ConsulError, self.c.refresh_session) @patch.object(consul.Consul.KV, 'delete', Mock()) def test_get_cluster(self): self.c._base_path = '/service/test' self.assertIsInstance(self.c.get_cluster(), Cluster) self.assertIsInstance(self.c.get_cluster(), Cluster) self.c._base_path = '/service/fail' self.assertRaises(ConsulError, self.c.get_cluster) self.c._base_path = '/service/good' self.c._session = 'fd4f44fe-2cac-bba5-a60b-304b51ff39b8' self.assertIsInstance(self.c.get_cluster(), Cluster) @patch.object(consul.Consul.KV, 'delete', Mock(side_effect=[ConsulException, True, True, True])) @patch.object(consul.Consul.KV, 'put', Mock(side_effect=[True, ConsulException, InvalidSession])) def test_touch_member(self): self.c.refresh_session = Mock(return_value=False) self.c.touch_member({'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5433/postgres', 'api_url': 'http://127.0.0.1:8009/patroni'}) self.c._register_service = True self.c.refresh_session = Mock(return_value=True) for _ in range(0, 4): self.c.touch_member({'balbla': 'blabla'}) @patch.object(consul.Consul.KV, 'put', Mock(side_effect=InvalidSession)) def test_take_leader(self): self.c.set_ttl(20) self.c.refresh_session = Mock() self.c.take_leader() @patch.object(consul.Consul.KV, 'put', Mock(return_value=True)) def test_set_failover_value(self): self.c.set_failover_value('') @patch.object(consul.Consul.KV, 'put', Mock(return_value=True)) def test_set_config_value(self): self.c.set_config_value('') @patch.object(consul.Consul.KV, 'put', Mock(side_effect=ConsulException)) def test_write_leader_optime(self): self.c.write_leader_optime('1') @patch.object(consul.Consul.Session, 'renew', Mock()) def test_update_leader(self): self.c.update_leader(None) @patch.object(consul.Consul.KV, 'delete', Mock(return_value=True)) def test_delete_leader(self): self.c.delete_leader() @patch.object(consul.Consul.KV, 'put', Mock(return_value=True)) def test_initialize(self): self.c.initialize() @patch.object(consul.Consul.KV, 'delete', Mock(return_value=True)) def test_cancel_initialization(self): self.c.cancel_initialization() @patch.object(consul.Consul.KV, 'delete', Mock(return_value=True)) def test_delete_cluster(self): self.c.delete_cluster() @patch.object(AbstractDCS, 'watch', Mock()) def test_watch(self): self.c.watch(None, 1) self.c._name = '' self.c.watch(6429, 1) with patch.object(consul.Consul.KV, 'get', Mock(side_effect=ConsulException)): self.c.watch(6429, 1) def test_set_retry_timeout(self): self.c.set_retry_timeout(10) @patch.object(consul.Consul.KV, 'delete', Mock(return_value=True)) @patch.object(consul.Consul.KV, 'put', Mock(return_value=True)) def test_sync_state(self): self.assertTrue(self.c.set_sync_state_value('{}')) self.assertTrue(self.c.delete_sync_state()) @patch.object(consul.Consul.KV, 'put', Mock(return_value=True)) def test_set_history_value(self): self.assertTrue(self.c.set_history_value('{}')) @patch.object(consul.Consul.Agent.Service, 'register', Mock(side_effect=(False, True))) @patch.object(consul.Consul.Agent.Service, 'deregister', Mock(return_value=True)) def test_update_service(self): d = {'role': 'replica', 'api_url': 'http://a/t', 'conn_url': 'pg://c:1', 'state': 'running'} self.assertIsNone(self.c.update_service({}, {})) self.assertFalse(self.c.update_service({}, d)) self.assertTrue(self.c.update_service(d, d)) self.assertIsNone(self.c.update_service(d, d)) d['state'] = 'stopped' self.assertTrue(self.c.update_service(d, d, force=True)) d['state'] = 'unknown' self.assertIsNone(self.c.update_service({}, d)) d['state'] = 'running' d['role'] = 'bla' self.assertIsNone(self.c.update_service({}, d)) def test_reload_config(self): self.c.reload_config({'consul': {'token': 'foo'}, 'loop_wait': 10, 'ttl': 30, 'retry_timeout': 10}) patroni-1.6.4/tests/test_ctl.py000066400000000000000000000735521361356115100165510ustar00rootroot00000000000000import etcd import os import unittest from click.testing import CliRunner from datetime import datetime, timedelta from mock import patch, Mock from patroni.ctl import ctl, store_config, load_config, output_members, get_dcs, parse_dcs, \ get_all_members, get_any_member, get_cursor, query_member, configure, PatroniCtlException, apply_config_changes, \ format_config_for_editing, show_diff, invoke_editor, format_pg_version, find_executable from patroni.dcs.etcd import Client, Failover from patroni.utils import tzutc from psycopg2 import OperationalError from urllib3 import PoolManager from . import MockConnect, MockCursor, MockResponse, psycopg2_connect from .test_etcd import etcd_read, socket_getaddrinfo from .test_ha import get_cluster_initialized_without_leader, get_cluster_initialized_with_leader, \ get_cluster_initialized_with_only_leader, get_cluster_not_initialized_without_leader, get_cluster, Member CONFIG_FILE_PATH = './test-ctl.yaml' def test_rw_config(): runner = CliRunner() with runner.isolated_filesystem(): load_config(CONFIG_FILE_PATH + '/dummy', None) store_config({'etcd': {'host': 'localhost:2379'}}, CONFIG_FILE_PATH + '/dummy') load_config(CONFIG_FILE_PATH + '/dummy', '0.0.0.0') os.remove(CONFIG_FILE_PATH + '/dummy') os.rmdir(CONFIG_FILE_PATH) @patch('patroni.ctl.load_config', Mock(return_value={'scope': 'alpha', 'postgresql': {'data_dir': '.', 'parameters': {}, 'retry_timeout': 5}, 'restapi': {'listen': '::', 'certfile': 'a'}, 'etcd': {'host': 'localhost:2379'}})) class TestCtl(unittest.TestCase): @patch('socket.getaddrinfo', socket_getaddrinfo) def setUp(self): with patch.object(Client, 'machines') as mock_machines: mock_machines.__get__ = Mock(return_value=['http://remotehost:2379']) self.runner = CliRunner() self.e = get_dcs({'etcd': {'ttl': 30, 'host': 'ok:2379', 'retry_timeout': 10}}, 'foo') @patch('psycopg2.connect', psycopg2_connect) def test_get_cursor(self): self.assertIsNone(get_cursor(get_cluster_initialized_without_leader(), {}, role='master')) self.assertIsNotNone(get_cursor(get_cluster_initialized_with_leader(), {}, role='master')) # MockCursor returns pg_is_in_recovery as false self.assertIsNone(get_cursor(get_cluster_initialized_with_leader(), {}, role='replica')) self.assertIsNotNone(get_cursor(get_cluster_initialized_with_leader(), {'database': 'foo'}, role='any')) def test_parse_dcs(self): assert parse_dcs(None) is None assert parse_dcs('localhost') == {'etcd': {'host': 'localhost:2379'}} assert parse_dcs('') == {'etcd': {'host': 'localhost:2379'}} assert parse_dcs('localhost:8500') == {'consul': {'host': 'localhost:8500'}} assert parse_dcs('zookeeper://localhost') == {'zookeeper': {'hosts': ['localhost:2181']}} assert parse_dcs('exhibitor://dummy') == {'exhibitor': {'hosts': ['dummy'], 'port': 8181}} assert parse_dcs('consul://localhost') == {'consul': {'host': 'localhost:8500'}} self.assertRaises(PatroniCtlException, parse_dcs, 'invalid://test') def test_output_members(self): scheduled_at = datetime.now(tzutc) + timedelta(seconds=600) cluster = get_cluster_initialized_with_leader(Failover(1, 'foo', 'bar', scheduled_at)) self.assertIsNone(output_members(cluster, name='abc', fmt='pretty')) self.assertIsNone(output_members(cluster, name='abc', fmt='json')) self.assertIsNone(output_members(cluster, name='abc', fmt='yaml')) self.assertIsNone(output_members(cluster, name='abc', fmt='tsv')) @patch('patroni.ctl.get_dcs') @patch.object(PoolManager, 'request', Mock(return_value=MockResponse())) def test_switchover(self, mock_get_dcs): mock_get_dcs.return_value = self.e mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader mock_get_dcs.return_value.set_failover_value = Mock() result = self.runner.invoke(ctl, ['switchover', 'dummy'], input='leader\nother\n\ny') assert 'leader' in result.output result = self.runner.invoke(ctl, ['switchover', 'dummy'], input='leader\nother\n2300-01-01T12:23:00\ny') assert result.exit_code == 0 with patch('patroni.dcs.Cluster.is_paused', Mock(return_value=True)): result = self.runner.invoke(ctl, ['switchover', 'dummy', '--force', '--scheduled', '2015-01-01T12:00:00']) assert result.exit_code == 1 # Aborting switchover, as we answer NO to the confirmation result = self.runner.invoke(ctl, ['switchover', 'dummy'], input='leader\nother\n\nN') assert result.exit_code == 1 # Aborting scheduled switchover, as we answer NO to the confirmation result = self.runner.invoke(ctl, ['switchover', 'dummy', '--scheduled', '2015-01-01T12:00:00+01:00'], input='leader\nother\n\nN') assert result.exit_code == 1 # Target and source are equal result = self.runner.invoke(ctl, ['switchover', 'dummy'], input='leader\nleader\n\ny') assert result.exit_code == 1 # Reality is not part of this cluster result = self.runner.invoke(ctl, ['switchover', 'dummy'], input='leader\nReality\n\ny') assert result.exit_code == 1 result = self.runner.invoke(ctl, ['switchover', 'dummy', '--force']) assert 'Member' in result.output result = self.runner.invoke(ctl, ['switchover', 'dummy', '--force', '--scheduled', '2015-01-01T12:00:00+01:00']) assert result.exit_code == 0 # Invalid timestamp result = self.runner.invoke(ctl, ['switchover', 'dummy', '--force', '--scheduled', 'invalid']) assert result.exit_code != 0 # Invalid timestamp result = self.runner.invoke(ctl, ['switchover', 'dummy', '--force', '--scheduled', '2115-02-30T12:00:00+01:00']) assert result.exit_code != 0 # Specifying wrong leader result = self.runner.invoke(ctl, ['switchover', 'dummy'], input='dummy') assert result.exit_code == 1 with patch.object(PoolManager, 'request', Mock(side_effect=Exception)): # Non-responding patroni result = self.runner.invoke(ctl, ['switchover', 'dummy'], input='leader\nother\n2300-01-01T12:23:00\ny') assert 'falling back to DCS' in result.output with patch.object(PoolManager, 'request') as mocked: mocked.return_value.status = 500 result = self.runner.invoke(ctl, ['switchover', 'dummy'], input='leader\nother\n\ny') assert 'Switchover failed' in result.output mocked.return_value.status = 501 mocked.return_value.data = b'Server does not support this operation' result = self.runner.invoke(ctl, ['switchover', 'dummy'], input='leader\nother\n\ny') assert 'Switchover failed' in result.output # No members available mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_only_leader result = self.runner.invoke(ctl, ['switchover', 'dummy'], input='leader\nother\n\ny') assert result.exit_code == 1 # No master available mock_get_dcs.return_value.get_cluster = get_cluster_initialized_without_leader result = self.runner.invoke(ctl, ['switchover', 'dummy'], input='leader\nother\n\ny') assert result.exit_code == 1 @patch('patroni.ctl.get_dcs') @patch.object(PoolManager, 'request', Mock(return_value=MockResponse())) def test_failover(self, mock_get_dcs): mock_get_dcs.return_value = self.e mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader mock_get_dcs.return_value.set_failover_value = Mock() result = self.runner.invoke(ctl, ['failover', 'dummy'], input='\n') assert 'Failover could be performed only to a specific candidate' in result.output @patch('patroni.dcs.dcs_modules', Mock(return_value=['patroni.dcs.dummy', 'patroni.dcs.etcd'])) def test_get_dcs(self): self.assertRaises(PatroniCtlException, get_dcs, {'dummy': {}}, 'dummy') @patch('psycopg2.connect', psycopg2_connect) @patch('patroni.ctl.query_member', Mock(return_value=([['mock column']], None))) @patch('patroni.ctl.get_dcs') @patch.object(etcd.Client, 'read', etcd_read) def test_query(self, mock_get_dcs): mock_get_dcs.return_value = self.e # Mutually exclusive result = self.runner.invoke(ctl, ['query', 'alpha', '--member', 'abc', '--role', 'master']) assert result.exit_code == 1 with self.runner.isolated_filesystem(): with open('dummy', 'w') as dummy_file: dummy_file.write('SELECT 1') # Mutually exclusive result = self.runner.invoke(ctl, ['query', 'alpha', '--file', 'dummy', '--command', 'dummy']) assert result.exit_code == 1 result = self.runner.invoke(ctl, ['query', 'alpha', '--file', 'dummy']) assert result.exit_code == 0 os.remove('dummy') result = self.runner.invoke(ctl, ['query', 'alpha', '--command', 'SELECT 1']) assert 'mock column' in result.output # --command or --file is mandatory result = self.runner.invoke(ctl, ['query', 'alpha']) assert result.exit_code == 1 result = self.runner.invoke(ctl, ['query', 'alpha', '--command', 'SELECT 1', '--username', 'root', '--password', '--dbname', 'postgres'], input='ab\nab') assert 'mock column' in result.output def test_query_member(self): with patch('patroni.ctl.get_cursor', Mock(return_value=MockConnect().cursor())): rows = query_member(None, None, None, 'master', 'SELECT pg_catalog.pg_is_in_recovery()', {}) self.assertTrue('False' in str(rows)) rows = query_member(None, None, None, 'replica', 'SELECT pg_catalog.pg_is_in_recovery()', {}) self.assertEqual(rows, (None, None)) with patch.object(MockCursor, 'execute', Mock(side_effect=OperationalError('bla'))): rows = query_member(None, None, None, 'replica', 'SELECT pg_catalog.pg_is_in_recovery()', {}) with patch('patroni.ctl.get_cursor', Mock(return_value=None)): rows = query_member(None, None, None, None, 'SELECT pg_catalog.pg_is_in_recovery()', {}) self.assertTrue('No connection to' in str(rows)) rows = query_member(None, None, None, 'replica', 'SELECT pg_catalog.pg_is_in_recovery()', {}) self.assertTrue('No connection to' in str(rows)) with patch('patroni.ctl.get_cursor', Mock(side_effect=OperationalError('bla'))): rows = query_member(None, None, None, 'replica', 'SELECT pg_catalog.pg_is_in_recovery()', {}) @patch('patroni.ctl.get_dcs') def test_dsn(self, mock_get_dcs): mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader result = self.runner.invoke(ctl, ['dsn', 'alpha']) assert 'host=127.0.0.1 port=5435' in result.output # Mutually exclusive options result = self.runner.invoke(ctl, ['dsn', 'alpha', '--role', 'master', '--member', 'dummy']) assert result.exit_code == 1 # Non-existing member result = self.runner.invoke(ctl, ['dsn', 'alpha', '--member', 'dummy']) assert result.exit_code == 1 @patch.object(PoolManager, 'request') @patch('patroni.ctl.get_dcs') def test_reload(self, mock_get_dcs, mock_post): mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader result = self.runner.invoke(ctl, ['reload', 'alpha'], input='y') assert 'Failed: reload for member' in result.output mock_post.return_value.status = 200 result = self.runner.invoke(ctl, ['reload', 'alpha'], input='y') assert 'No changes to apply on member' in result.output mock_post.return_value.status = 202 result = self.runner.invoke(ctl, ['reload', 'alpha'], input='y') assert 'Reload request received for member' in result.output @patch.object(PoolManager, 'request') @patch('patroni.ctl.get_dcs') def test_restart_reinit(self, mock_get_dcs, mock_post): mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader mock_post.return_value.status = 503 result = self.runner.invoke(ctl, ['restart', 'alpha'], input='now\ny\n') assert 'Failed: restart for' in result.output assert result.exit_code == 0 result = self.runner.invoke(ctl, ['reinit', 'alpha'], input='y') assert result.exit_code == 1 # successful reinit result = self.runner.invoke(ctl, ['reinit', 'alpha', 'other'], input='y\ny') assert result.exit_code == 0 # Aborted restart result = self.runner.invoke(ctl, ['restart', 'alpha'], input='now\nN') assert result.exit_code == 1 result = self.runner.invoke(ctl, ['restart', 'alpha', '--pending', '--force']) assert result.exit_code == 0 # Aborted scheduled restart result = self.runner.invoke(ctl, ['restart', 'alpha', '--scheduled', '2019-10-01T14:30'], input='N') assert result.exit_code == 1 # Not a member result = self.runner.invoke(ctl, ['restart', 'alpha', 'dummy', '--any'], input='now\ny') assert result.exit_code == 1 # Wrong pg version result = self.runner.invoke(ctl, ['restart', 'alpha', '--any', '--pg-version', '9.1'], input='now\ny') assert 'Error: Invalid PostgreSQL version format' in result.output assert result.exit_code == 1 result = self.runner.invoke(ctl, ['restart', 'alpha', '--pending', '--force', '--timeout', '10min']) assert result.exit_code == 0 # normal restart, the schedule is actually parsed, but not validated in patronictl result = self.runner.invoke(ctl, ['restart', 'alpha', 'other', '--force', '--scheduled', '2300-10-01T14:30']) assert 'Failed: flush scheduled restart' in result.output with patch('patroni.dcs.Cluster.is_paused', Mock(return_value=True)): result = self.runner.invoke(ctl, ['restart', 'alpha', 'other', '--force', '--scheduled', '2300-10-01T14:30']) assert result.exit_code == 1 # force restart with restart already present result = self.runner.invoke(ctl, ['restart', 'alpha', 'other', '--force', '--scheduled', '2300-10-01T14:30']) assert result.exit_code == 0 ctl_args = ['restart', 'alpha', '--pg-version', '99.0', '--scheduled', '2300-10-01T14:30'] # normal restart, the schedule is actually parsed, but not validated in patronictl mock_post.return_value.status = 200 result = self.runner.invoke(ctl, ctl_args, input='y') assert result.exit_code == 0 # get restart with the non-200 return code # normal restart, the schedule is actually parsed, but not validated in patronictl mock_post.return_value.status = 204 result = self.runner.invoke(ctl, ctl_args, input='y') assert result.exit_code == 0 # get restart with the non-200 return code # normal restart, the schedule is actually parsed, but not validated in patronictl mock_post.return_value.status = 202 result = self.runner.invoke(ctl, ctl_args, input='y') assert 'Success: restart scheduled' in result.output assert result.exit_code == 0 # get restart with the non-200 return code # normal restart, the schedule is actually parsed, but not validated in patronictl mock_post.return_value.status = 409 result = self.runner.invoke(ctl, ctl_args, input='y') assert 'Failed: another restart is already' in result.output assert result.exit_code == 0 @patch('patroni.ctl.get_dcs') def test_remove(self, mock_get_dcs): mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader result = self.runner.invoke(ctl, ['-k', 'remove', 'alpha'], input='alpha\nslave') assert 'Please confirm' in result.output assert 'You are about to remove all' in result.output # Not typing an exact confirmation assert result.exit_code == 1 # master specified does not match master of cluster result = self.runner.invoke(ctl, ['remove', 'alpha'], input='alpha\nYes I am aware\nslave') assert result.exit_code == 1 # cluster specified on cmdline does not match verification prompt result = self.runner.invoke(ctl, ['remove', 'alpha'], input='beta\nleader') assert result.exit_code == 1 result = self.runner.invoke(ctl, ['remove', 'alpha'], input='alpha\nYes I am aware\nleader') assert result.exit_code == 0 def test_ctl(self): self.runner.invoke(ctl, ['list']) result = self.runner.invoke(ctl, ['--help']) assert 'Usage:' in result.output def test_get_any_member(self): self.assertIsNone(get_any_member(get_cluster_initialized_without_leader(), role='master')) m = get_any_member(get_cluster_initialized_with_leader(), role='master') self.assertEqual(m.name, 'leader') def test_get_all_members(self): self.assertEqual(list(get_all_members(get_cluster_initialized_without_leader(), role='master')), []) r = list(get_all_members(get_cluster_initialized_with_leader(), role='master')) self.assertEqual(len(r), 1) self.assertEqual(r[0].name, 'leader') r = list(get_all_members(get_cluster_initialized_with_leader(), role='replica')) self.assertEqual(len(r), 1) self.assertEqual(r[0].name, 'other') self.assertEqual(len(list(get_all_members(get_cluster_initialized_without_leader(), role='replica'))), 2) @patch('patroni.ctl.get_dcs') def test_members(self, mock_get_dcs): mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader result = self.runner.invoke(ctl, ['list']) assert '127.0.0.1' in result.output assert result.exit_code == 0 with patch('patroni.ctl.load_config', Mock(return_value={})): self.runner.invoke(ctl, ['list']) def test_configure(self): result = self.runner.invoke(configure, ['--dcs', 'abc', '-c', 'dummy', '-n', 'bla']) assert result.exit_code == 0 @patch('patroni.ctl.get_dcs') def test_scaffold(self, mock_get_dcs): mock_get_dcs.return_value = self.e mock_get_dcs.return_value.get_cluster = get_cluster_not_initialized_without_leader mock_get_dcs.return_value.initialize = Mock(return_value=True) mock_get_dcs.return_value.touch_member = Mock(return_value=True) mock_get_dcs.return_value.attempt_to_acquire_leader = Mock(return_value=True) mock_get_dcs.return_value.delete_cluster = Mock() with patch.object(self.e, 'initialize', return_value=False): result = self.runner.invoke(ctl, ['scaffold', 'alpha']) assert result.exception with patch.object(mock_get_dcs.return_value, 'touch_member', Mock(return_value=False)): result = self.runner.invoke(ctl, ['scaffold', 'alpha']) assert result.exception result = self.runner.invoke(ctl, ['scaffold', 'alpha']) assert result.exit_code == 0 mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader result = self.runner.invoke(ctl, ['scaffold', 'alpha']) assert result.exception @patch('patroni.ctl.get_dcs') def test_list_extended(self, mock_get_dcs): mock_get_dcs.return_value = self.e cluster = get_cluster_initialized_with_leader(sync=('leader', 'other')) mock_get_dcs.return_value.get_cluster = Mock(return_value=cluster) result = self.runner.invoke(ctl, ['list', 'dummy', '--extended', '--timestamp']) assert '2100' in result.output assert 'Scheduled restart' in result.output @patch('patroni.ctl.get_dcs') @patch.object(PoolManager, 'request', Mock(return_value=MockResponse())) def test_flush(self, mock_get_dcs): mock_get_dcs.return_value = self.e mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader result = self.runner.invoke(ctl, ['flush', 'dummy', 'restart', '-r', 'master'], input='y') assert 'No scheduled restart' in result.output result = self.runner.invoke(ctl, ['flush', 'dummy', 'restart', '--force']) assert 'Success: flush scheduled restart' in result.output with patch.object(PoolManager, 'request', return_value=MockResponse(404)): result = self.runner.invoke(ctl, ['flush', 'dummy', 'restart', '--force']) assert 'Failed: flush scheduled restart' in result.output @patch.object(PoolManager, 'request') @patch('patroni.ctl.get_dcs') @patch('patroni.ctl.polling_loop', Mock(return_value=[1])) def test_pause_cluster(self, mock_get_dcs, mock_post): mock_get_dcs.return_value = self.e mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader mock_post.return_value.status = 500 result = self.runner.invoke(ctl, ['pause', 'dummy']) assert 'Failed' in result.output mock_post.return_value.status = 200 with patch('patroni.dcs.Cluster.is_paused', Mock(return_value=True)): result = self.runner.invoke(ctl, ['pause', 'dummy']) assert 'Cluster is already paused' in result.output result = self.runner.invoke(ctl, ['pause', 'dummy', '--wait']) assert "'pause' request sent" in result.output mock_get_dcs.return_value.get_cluster = Mock(side_effect=[get_cluster_initialized_with_leader(), get_cluster(None, None, [], None, None)]) self.runner.invoke(ctl, ['pause', 'dummy', '--wait']) member = Member(1, 'other', 28, {}) mock_get_dcs.return_value.get_cluster = Mock(side_effect=[get_cluster_initialized_with_leader(), get_cluster(None, None, [member], None, None)]) self.runner.invoke(ctl, ['pause', 'dummy', '--wait']) @patch.object(PoolManager, 'request') @patch('patroni.ctl.get_dcs') def test_resume_cluster(self, mock_get_dcs, mock_post): mock_get_dcs.return_value = self.e mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader mock_post.return_value.status = 200 with patch('patroni.dcs.Cluster.is_paused', Mock(return_value=False)): result = self.runner.invoke(ctl, ['resume', 'dummy']) assert 'Cluster is not paused' in result.output with patch('patroni.dcs.Cluster.is_paused', Mock(return_value=True)): result = self.runner.invoke(ctl, ['resume', 'dummy']) assert 'Success' in result.output mock_post.return_value.status = 500 result = self.runner.invoke(ctl, ['resume', 'dummy']) assert 'Failed' in result.output mock_post.side_effect = Exception result = self.runner.invoke(ctl, ['resume', 'dummy']) assert 'Can not find accessible cluster member' in result.output def test_apply_config_changes(self): config = {"postgresql": {"parameters": {"work_mem": "4MB"}, "use_pg_rewind": True}, "ttl": 30} before_editing = format_config_for_editing(config) # Spaces are allowed and stripped, numbers and booleans are interpreted after_editing, changed_config = apply_config_changes(before_editing, config, ["postgresql.parameters.work_mem = 5MB", "ttl=15", "postgresql.use_pg_rewind=off", 'a.b=c']) self.assertEqual(changed_config, {"a": {"b": "c"}, "postgresql": {"parameters": {"work_mem": "5MB"}, "use_pg_rewind": False}, "ttl": 15}) # postgresql.parameters namespace is flattened after_editing, changed_config = apply_config_changes(before_editing, config, ["postgresql.parameters.work_mem.sub = x"]) self.assertEqual(changed_config, {"postgresql": {"parameters": {"work_mem": "4MB", "work_mem.sub": "x"}, "use_pg_rewind": True}, "ttl": 30}) # Setting to null deletes after_editing, changed_config = apply_config_changes(before_editing, config, ["postgresql.parameters.work_mem=null"]) self.assertEqual(changed_config, {"postgresql": {"use_pg_rewind": True}, "ttl": 30}) after_editing, changed_config = apply_config_changes(before_editing, config, ["postgresql.use_pg_rewind=null", "postgresql.parameters.work_mem=null"]) self.assertEqual(changed_config, {"ttl": 30}) self.assertRaises(PatroniCtlException, apply_config_changes, before_editing, config, ['a']) @patch('sys.stdout.isatty', return_value=False) @patch('cdiff.markup_to_pager') def test_show_diff(self, mock_markup_to_pager, mock_isatty): show_diff("foo:\n bar: 1\n", "foo:\n bar: 2\n") mock_markup_to_pager.assert_not_called() mock_isatty.return_value = True show_diff("foo:\n bar: 1\n", "foo:\n bar: 2\n") mock_markup_to_pager.assert_called_once() # Test that unicode handling doesn't fail with an exception show_diff(b"foo:\n bar: \xc3\xb6\xc3\xb6\n".decode('utf-8'), b"foo:\n bar: \xc3\xbc\xc3\xbc\n".decode('utf-8')) @patch('subprocess.call', return_value=1) def test_invoke_editor(self, mock_subprocess_call): os.environ.pop('EDITOR', None) for e in ('', '/bin/vi'): with patch('patroni.ctl.find_executable', Mock(return_value=e)): self.assertRaises(PatroniCtlException, invoke_editor, 'foo: bar\n', 'test') @patch('patroni.ctl.get_dcs') def test_show_config(self, mock_get_dcs): mock_get_dcs.return_value = self.e mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader self.runner.invoke(ctl, ['show-config', 'dummy']) @patch('patroni.ctl.get_dcs') def test_edit_config(self, mock_get_dcs): mock_get_dcs.return_value = self.e mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader mock_get_dcs.return_value.set_config_value = Mock(return_value=False) os.environ['EDITOR'] = 'true' self.runner.invoke(ctl, ['edit-config', 'dummy']) self.runner.invoke(ctl, ['edit-config', 'dummy', '-s', 'foo=bar']) self.runner.invoke(ctl, ['edit-config', 'dummy', '--replace', 'postgres0.yml']) self.runner.invoke(ctl, ['edit-config', 'dummy', '--apply', '-'], input='foo: bar') self.runner.invoke(ctl, ['edit-config', 'dummy', '--force', '--apply', '-'], input='foo: bar') mock_get_dcs.return_value.set_config_value.return_value = True self.runner.invoke(ctl, ['edit-config', 'dummy', '--force', '--apply', '-'], input='foo: bar') @patch('patroni.ctl.get_dcs') def test_version(self, mock_get_dcs): mock_get_dcs.return_value = self.e mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader with patch.object(PoolManager, 'request') as mocked: result = self.runner.invoke(ctl, ['version']) assert 'patronictl version' in result.output mocked.return_value.data = b'{"patroni":{"version":"1.2.3"},"server_version": 100001}' result = self.runner.invoke(ctl, ['version', 'dummy']) assert '1.2.3' in result.output with patch.object(PoolManager, 'request', Mock(side_effect=Exception)): result = self.runner.invoke(ctl, ['version', 'dummy']) assert 'failed to get version' in result.output @patch('patroni.ctl.get_dcs') def test_history(self, mock_get_dcs): mock_get_dcs.return_value.get_cluster = Mock() mock_get_dcs.return_value.get_cluster.return_value.history.lines = [[1, 67176, 'no recovery target specified']] result = self.runner.invoke(ctl, ['history']) assert 'Reason' in result.output def test_format_pg_version(self): self.assertEqual(format_pg_version(100001), '10.1') self.assertEqual(format_pg_version(90605), '9.6.5') @patch('sys.platform', 'win32') def test_find_executable(self): with patch('os.path.isfile', Mock(return_value=True)): self.assertEqual(find_executable('vim'), 'vim.exe') with patch('os.path.isfile', Mock(return_value=False)): self.assertIsNone(find_executable('vim')) with patch('os.path.isfile', Mock(side_effect=[False, True])): self.assertEqual(find_executable('vim', '/'), '/vim.exe') @patch('patroni.ctl.get_dcs') def test_get_members(self, mock_get_dcs): mock_get_dcs.return_value = self.e mock_get_dcs.return_value.get_cluster = get_cluster_not_initialized_without_leader result = self.runner.invoke(ctl, ['reinit', 'dummy']) assert "cluster doesn\'t have any members" in result.output @patch('time.sleep', Mock()) @patch('patroni.ctl.get_dcs') def test_reinit_wait(self, mock_get_dcs): mock_get_dcs.return_value.get_cluster = get_cluster_initialized_with_leader with patch.object(PoolManager, 'request') as mocked: mocked.side_effect = [Mock(data=s, status=200) for s in [b"reinitialize", b'{"state":"creating replica"}', b'{"state":"running"}']] result = self.runner.invoke(ctl, ['reinit', 'alpha', 'other', '--wait'], input='y\ny') self.assertIn("Waiting for reinitialize to complete on: other", result.output) self.assertIn("Reinitialize is completed on: other", result.output) patroni-1.6.4/tests/test_etcd.py000066400000000000000000000327301361356115100166770ustar00rootroot00000000000000import etcd import urllib3.util.connection import socket import unittest from dns.exception import DNSException from mock import Mock, patch from patroni.dcs.etcd import AbstractDCS, Client, Cluster, Etcd, EtcdError, DnsCachingResolver from patroni.exceptions import DCSError from patroni.utils import Retry from urllib3.exceptions import ReadTimeoutError from . import SleepException, MockResponse, requests_get def etcd_watch(self, key, index=None, timeout=None, recursive=None): if timeout == 2.0: raise etcd.EtcdWatchTimedOut elif timeout == 5.0: return etcd.EtcdResult('delete', {}) elif 5 < timeout <= 10.0: raise etcd.EtcdException elif timeout == 20.0: raise etcd.EtcdEventIndexCleared def etcd_write(self, key, value, **kwargs): if key == '/service/exists/leader': raise etcd.EtcdAlreadyExist if key in ['/service/test/leader', '/patroni/test/leader'] and \ (kwargs.get('prevValue') == 'foo' or not kwargs.get('prevExist', True)): return True raise etcd.EtcdException def etcd_read(self, key, **kwargs): if key == '/service/noleader/': raise DCSError('noleader') elif key == '/service/nocluster/': raise etcd.EtcdKeyNotFound response = {"action": "get", "node": {"key": "/service/batman5", "dir": True, "nodes": [ {"key": "/service/batman5/config", "value": '{"synchronous_mode": 0}', "modifiedIndex": 1582, "createdIndex": 1582}, {"key": "/service/batman5/failover", "value": "", "modifiedIndex": 1582, "createdIndex": 1582}, {"key": "/service/batman5/initialize", "value": "postgresql0", "modifiedIndex": 1582, "createdIndex": 1582}, {"key": "/service/batman5/leader", "value": "postgresql1", "expiration": "2015-05-15T09:11:00.037397538Z", "ttl": 21, "modifiedIndex": 20728, "createdIndex": 20434}, {"key": "/service/batman5/optime", "dir": True, "nodes": [ {"key": "/service/batman5/optime/leader", "value": "2164261704", "modifiedIndex": 20729, "createdIndex": 20729}], "modifiedIndex": 20437, "createdIndex": 20437}, {"key": "/service/batman5/sync", "value": '{"leader": "leader"}', "modifiedIndex": 1582, "createdIndex": 1582}, {"key": "/service/batman5/members", "dir": True, "nodes": [ {"key": "/service/batman5/members/postgresql1", "value": "postgres://replicator:rep-pass@127.0.0.1:5434/postgres" + "?application_name=http://127.0.0.1:8009/patroni", "expiration": "2015-05-15T09:10:59.949384522Z", "ttl": 21, "modifiedIndex": 20727, "createdIndex": 20727}, {"key": "/service/batman5/members/postgresql0", "value": "postgres://replicator:rep-pass@127.0.0.1:5433/postgres" + "?application_name=http://127.0.0.1:8008/patroni", "expiration": "2015-05-15T09:11:09.611860899Z", "ttl": 30, "modifiedIndex": 20730, "createdIndex": 20730}], "modifiedIndex": 1581, "createdIndex": 1581}], "modifiedIndex": 1581, "createdIndex": 1581}} result = etcd.EtcdResult(**response) result.etcd_index = 0 return result def dns_query(name, _): if '-server' not in name or '-ssl' in name: return [] if name == '_etcd-server._tcp.blabla': return [] elif name == '_etcd-server._tcp.exception': raise DNSException() srv = Mock() srv.port = 2380 srv.target.to_text.return_value = 'localhost' if name == '_etcd-server._tcp.foobar' else '127.0.0.1' return [srv] def socket_getaddrinfo(*args): if args[0] in ('ok', 'localhost', '127.0.0.1'): return [(socket.AF_INET, 1, 6, '', ('127.0.0.1', 0)), (socket.AF_INET6, 1, 6, '', ('::1', 0))] raise socket.gaierror def http_request(method, url, **kwargs): if url == 'http://localhost:2379/timeout': raise ReadTimeoutError(None, None, None) ret = MockResponse() if url == 'http://localhost:2379/v2/machines': ret.content = 'http://localhost:2379,http://localhost:4001' elif url == 'http://localhost:4001/v2/machines': ret.content = '' elif url != 'http://localhost:2379/': raise socket.error return ret class TestDnsCachingResolver(unittest.TestCase): @patch('time.sleep', Mock(side_effect=SleepException)) @patch('socket.getaddrinfo', Mock(side_effect=socket.gaierror)) def test_run(self): r = DnsCachingResolver() self.assertIsNone(r.resolve_async('', 0)) r.join() @patch('dns.resolver.query', dns_query) @patch('socket.getaddrinfo', socket_getaddrinfo) @patch('patroni.dcs.etcd.requests_get', requests_get) class TestClient(unittest.TestCase): @patch('dns.resolver.query', dns_query) @patch('socket.getaddrinfo', socket_getaddrinfo) @patch('patroni.dcs.etcd.requests_get', requests_get) def setUp(self): with patch.object(Client, 'machines') as mock_machines: mock_machines.__get__ = Mock(return_value=['http://localhost:2379', 'http://localhost:4001']) self.client = Client({'srv': 'test', 'retry_timeout': 3}, DnsCachingResolver()) self.client.http.request = http_request self.client.http.request_encode_body = http_request def test_machines(self): self.client._base_uri = 'http://localhost:4001' self.client._machines_cache = ['http://localhost:2379'] self.assertIsNotNone(self.client.machines) self.client._base_uri = 'http://localhost:4001' self.client._machines_cache = [] self.assertIsNotNone(self.client.machines) self.client._update_machines_cache = True machines = None try: machines = self.client.machines self.assertFail() except Exception: self.assertIsNone(machines) @patch.object(Client, 'machines') def test_api_execute(self, mock_machines): mock_machines.__get__ = Mock(return_value=['http://localhost:2379']) self.assertRaises(ValueError, self.client.api_execute, '', '') self.client._base_uri = 'http://localhost:4001' self.client._machines_cache = ['http://localhost:2379'] self.assertRaises(etcd.EtcdException, self.client.api_execute, '/', 'POST', timeout=0) self.client._base_uri = 'http://localhost:4001' self.client._machines_cache = ['http://localhost:2379'] rtry = Retry(deadline=10, max_delay=1, max_tries=-1, retry_exceptions=(etcd.EtcdLeaderElectionInProgress,)) rtry(self.client.api_execute, '/', 'POST', timeout=0, params={'retry': rtry}) mock_machines.__get__ = Mock(return_value=['http://localhost:2379']) self.client._machines_cache_updated = 0 self.client.api_execute('/', 'POST', timeout=0) self.client._machines_cache = [self.client._base_uri] self.assertRaises(etcd.EtcdWatchTimedOut, self.client.api_execute, '/timeout', 'POST', params={'wait': 'true'}) self.assertRaises(etcd.EtcdWatchTimedOut, self.client.api_execute, '/timeout', 'POST', params={'wait': 'true'}) self.assertRaises(etcd.EtcdException, self.client.api_execute, '/', '') with patch.object(Client, '_do_http_request', Mock(side_effect=etcd.EtcdConnectionFailed)): with patch.object(Client, '_calculate_timeouts', Mock(side_effect=[(1, 1, 0), (1, 1, 0), (0, 1, 0)])): self.assertRaises(etcd.EtcdException, rtry, self.client.api_execute, '/', 'GET', params={'retry': rtry}) self.client._read_timeout = 0 self.assertRaises(etcd.EtcdException, self.client.api_execute, '/', 'GET') def test_get_srv_record(self): self.assertEqual(self.client.get_srv_record('_etcd-server._tcp.blabla'), []) self.assertEqual(self.client.get_srv_record('_etcd-server._tcp.exception'), []) def test__get_machines_cache_from_srv(self): self.client._get_machines_cache_from_srv('foobar') self.client.get_srv_record = Mock(return_value=[('localhost', 2380)]) self.client._get_machines_cache_from_srv('blabla') def test__get_machines_cache_from_dns(self): self.client._get_machines_cache_from_dns('error', 2379) @patch.object(Client, 'machines') def test__load_machines_cache(self, mock_machines): mock_machines.__get__ = Mock(return_value=['http://localhost:2379']) self.client._config = {} self.assertRaises(Exception, self.client._load_machines_cache) self.client._config = {'srv': 'blabla'} self.assertRaises(etcd.EtcdException, self.client._load_machines_cache) @patch.object(socket.socket, 'connect') def test_create_connection_patched(self, mock_connect): self.assertRaises(socket.error, urllib3.util.connection.create_connection, ('fail', 2379)) urllib3.util.connection.create_connection(('[localhost]', 2379)) mock_connect.side_effect = socket.error self.assertRaises(socket.error, urllib3.util.connection.create_connection, ('[localhost]', 2379), timeout=1, source_address=('localhost', 53333), socket_options=[(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)]) @patch('patroni.dcs.etcd.requests_get', requests_get) @patch('socket.getaddrinfo', socket_getaddrinfo) @patch.object(etcd.Client, 'write', etcd_write) @patch.object(etcd.Client, 'read', etcd_read) @patch.object(etcd.Client, 'delete', Mock(side_effect=etcd.EtcdException)) class TestEtcd(unittest.TestCase): @patch('socket.getaddrinfo', socket_getaddrinfo) def setUp(self): with patch.object(Client, 'machines') as mock_machines: mock_machines.__get__ = Mock(return_value=['http://localhost:2379', 'http://localhost:4001']) self.etcd = Etcd({'namespace': '/patroni/', 'ttl': 30, 'retry_timeout': 10, 'host': 'localhost:2379', 'scope': 'test', 'name': 'foo'}) def test_base_path(self): self.assertEqual(self.etcd._base_path, '/patroni/test') @patch('dns.resolver.query', dns_query) def test_get_etcd_client(self): with patch('time.sleep', Mock(side_effect=SleepException)),\ patch.object(Client, 'machines') as mock_machines: mock_machines.__get__ = Mock(side_effect=etcd.EtcdException) self.assertRaises(SleepException, self.etcd.get_etcd_client, {'discovery_srv': 'test', 'retry_timeout': 10, 'cacert': '1', 'key': '1', 'cert': 1}) self.assertRaises(SleepException, self.etcd.get_etcd_client, {'url': 'https://test:2379', 'retry_timeout': 10}) self.assertRaises(SleepException, self.etcd.get_etcd_client, {'hosts': 'foo:4001,bar', 'retry_timeout': 10}) mock_machines.__get__ = Mock(return_value=[]) self.assertRaises(SleepException, self.etcd.get_etcd_client, {'proxy': 'https://user:password@test:2379', 'retry_timeout': 10}) def test_get_cluster(self): cluster = self.etcd.get_cluster() self.assertIsInstance(cluster, Cluster) self.assertFalse(cluster.is_synchronous_mode()) self.etcd._base_path = '/service/nocluster' cluster = self.etcd.get_cluster() self.assertIsInstance(cluster, Cluster) self.assertIsNone(cluster.leader) self.etcd._base_path = '/service/noleader' self.assertRaises(EtcdError, self.etcd.get_cluster) def test_touch_member(self): self.assertFalse(self.etcd.touch_member('', '')) def test_take_leader(self): self.assertFalse(self.etcd.take_leader()) def test_attempt_to_acquire_leader(self): self.etcd._base_path = '/service/exists' self.assertFalse(self.etcd.attempt_to_acquire_leader()) self.etcd._base_path = '/service/failed' self.assertFalse(self.etcd.attempt_to_acquire_leader()) def test_write_leader_optime(self): self.etcd.write_leader_optime('0') def test_update_leader(self): self.assertTrue(self.etcd.update_leader(None)) def test_initialize(self): self.assertFalse(self.etcd.initialize()) def test_cancel_initializion(self): self.assertFalse(self.etcd.cancel_initialization()) def test_delete_leader(self): self.assertFalse(self.etcd.delete_leader()) def test_delete_cluster(self): self.assertFalse(self.etcd.delete_cluster()) @patch('time.sleep', Mock(side_effect=SleepException)) @patch.object(etcd.Client, 'watch', etcd_watch) def test_watch(self): self.etcd.watch(None, 0) self.etcd.get_cluster() self.etcd.watch(20729, 1.5) self.etcd.watch(20729, 4.5) with patch.object(AbstractDCS, 'watch', Mock()): self.assertTrue(self.etcd.watch(20729, 19.5)) self.assertRaises(SleepException, self.etcd.watch, 20729, 9.5) def test_other_exceptions(self): self.etcd.retry = Mock(side_effect=AttributeError('foo')) self.assertRaises(EtcdError, self.etcd.cancel_initialization) def test_set_ttl(self): self.etcd.set_ttl(20) self.assertTrue(self.etcd.watch(None, 1)) def test_sync_state(self): self.assertFalse(self.etcd.write_sync_state('leader', None)) self.assertFalse(self.etcd.delete_sync_state()) def test_set_history_value(self): self.assertFalse(self.etcd.set_history_value('{}')) patroni-1.6.4/tests/test_exhibitor.py000066400000000000000000000024401361356115100177500ustar00rootroot00000000000000import unittest import urllib3 from mock import Mock, patch from patroni.dcs.exhibitor import ExhibitorEnsembleProvider, Exhibitor from patroni.dcs.zookeeper import ZooKeeperError from . import SleepException, requests_get from .test_zookeeper import MockKazooClient @patch('patroni.dcs.exhibitor.requests_get', requests_get) @patch('time.sleep', Mock(side_effect=SleepException)) class TestExhibitorEnsembleProvider(unittest.TestCase): def test_init(self): self.assertRaises(SleepException, ExhibitorEnsembleProvider, ['localhost'], 8181) def test_poll(self): self.assertFalse(ExhibitorEnsembleProvider(['exhibitor'], 8181).poll()) class TestExhibitor(unittest.TestCase): @patch('urllib3.PoolManager.request', Mock(return_value=urllib3.HTTPResponse( status=200, body=b'{"servers":["127.0.0.1","127.0.0.2","127.0.0.3"],"port":2181}'))) @patch('patroni.dcs.zookeeper.KazooClient', MockKazooClient) def setUp(self): self.e = Exhibitor({'hosts': ['localhost', 'exhibitor'], 'port': 8181, 'scope': 'test', 'name': 'foo', 'ttl': 30, 'retry_timeout': 10}) @patch.object(ExhibitorEnsembleProvider, 'poll', Mock(return_value=True)) def test_get_cluster(self): self.assertRaises(ZooKeeperError, self.e.get_cluster) patroni-1.6.4/tests/test_ha.py000066400000000000000000001503541361356115100163530ustar00rootroot00000000000000import datetime import etcd import os import sys from mock import Mock, MagicMock, PropertyMock, patch from patroni.config import Config from patroni.dcs import Cluster, ClusterConfig, Failover, Leader, Member, get_dcs, SyncState, TimelineHistory from patroni.dcs.etcd import Client from patroni.exceptions import DCSError, PostgresConnectionException, PatroniException from patroni.ha import Ha, _MemberStatus from patroni.postgresql import Postgresql from patroni.postgresql.bootstrap import Bootstrap from patroni.postgresql.cancellable import CancellableSubprocess from patroni.postgresql.config import ConfigHandler from patroni.postgresql.rewind import Rewind from patroni.postgresql.slots import SlotsHandler from patroni.utils import tzutc from patroni.watchdog import Watchdog from . import PostgresInit, MockPostmaster, psycopg2_connect, requests_get from .test_etcd import socket_getaddrinfo, etcd_read, etcd_write SYSID = '12345678901' def true(*args, **kwargs): return True def false(*args, **kwargs): return False def get_cluster(initialize, leader, members, failover, sync, cluster_config=None): t = datetime.datetime.now().isoformat() history = TimelineHistory(1, '[[1,67197376,"no recovery target specified","' + t + '"]]', [(1, 67197376, 'no recovery target specified', t)]) cluster_config = cluster_config or ClusterConfig(1, {'check_timeline': True}, 1) return Cluster(initialize, cluster_config, leader, 10, members, failover, sync, history) def get_cluster_not_initialized_without_leader(cluster_config=None): return get_cluster(None, None, [], None, SyncState(None, None, None), cluster_config) def get_cluster_initialized_without_leader(leader=False, failover=None, sync=None, cluster_config=None): m1 = Member(0, 'leader', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5435/postgres', 'api_url': 'http://127.0.0.1:8008/patroni', 'xlog_location': 4}) leader = Leader(0, 0, m1) if leader else None m2 = Member(0, 'other', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5436/postgres', 'api_url': 'http://127.0.0.1:8011/patroni', 'state': 'running', 'pause': True, 'tags': {'clonefrom': True}, 'scheduled_restart': {'schedule': "2100-01-01 10:53:07.560445+00:00", 'postgres_version': '99.0.0'}}) syncstate = SyncState(0 if sync else None, sync and sync[0], sync and sync[1]) return get_cluster(SYSID, leader, [m1, m2], failover, syncstate, cluster_config) def get_cluster_initialized_with_leader(failover=None, sync=None): return get_cluster_initialized_without_leader(leader=True, failover=failover, sync=sync) def get_cluster_initialized_with_only_leader(failover=None, cluster_config=None): leader = get_cluster_initialized_without_leader(leader=True, failover=failover).leader return get_cluster(True, leader, [leader], failover, None, cluster_config) def get_standby_cluster_initialized_with_only_leader(failover=None, sync=None): return get_cluster_initialized_with_only_leader( cluster_config=ClusterConfig(1, { "standby_cluster": { "host": "localhost", "port": 5432, "primary_slot_name": "", }}, 1) ) def get_node_status(reachable=True, in_recovery=True, timeline=2, wal_position=10, nofailover=False, watchdog_failed=False): def fetch_node_status(e): tags = {} if nofailover: tags['nofailover'] = True return _MemberStatus(e, reachable, in_recovery, timeline, wal_position, tags, watchdog_failed) return fetch_node_status future_restart_time = datetime.datetime.now(tzutc) + datetime.timedelta(days=5) postmaster_start_time = datetime.datetime.now(tzutc) class MockPatroni(object): def __init__(self, p, d): os.environ[Config.PATRONI_CONFIG_VARIABLE] = """ restapi: listen: 0.0.0.0:8008 bootstrap: users: replicator: password: rep-pass options: - replication postgresql: name: foo data_dir: data/postgresql0 pg_rewind: username: postgres password: postgres watchdog: mode: off zookeeper: exhibitor: hosts: [localhost] port: 8181 """ # We rely on sys.argv in Config, so it's necessary to reset # all the extra values that are coming from py.test sys.argv = sys.argv[:1] self.config = Config(None) self.config.set_dynamic_configuration({'maximum_lag_on_failover': 5}) self.version = '1.5.7' self.postgresql = p self.dcs = d self.api = Mock() self.tags = {'foo': 'bar'} self.nofailover = None self.replicatefrom = None self.api.connection_string = 'http://127.0.0.1:8008' self.clonefrom = None self.nosync = False self.scheduled_restart = {'schedule': future_restart_time, 'postmaster_start_time': str(postmaster_start_time)} self.watchdog = Watchdog(self.config) self.request = lambda member, **kwargs: requests_get(member.api_url, **kwargs) def run_async(self, func, args=()): self.reset_scheduled_action() if args: func(*args) else: func() @patch.object(Postgresql, 'is_running', Mock(return_value=MockPostmaster())) @patch.object(Postgresql, 'is_leader', Mock(return_value=True)) @patch.object(Postgresql, 'timeline_wal_position', Mock(return_value=(1, 10, 1))) @patch.object(Postgresql, '_cluster_info_state_get', Mock(return_value=3)) @patch.object(Postgresql, 'call_nowait', Mock(return_value=True)) @patch.object(Postgresql, 'data_directory_empty', Mock(return_value=False)) @patch.object(Postgresql, 'controldata', Mock(return_value={'Database system identifier': SYSID})) @patch.object(SlotsHandler, 'sync_replication_slots', Mock()) @patch.object(ConfigHandler, 'append_pg_hba', Mock()) @patch.object(ConfigHandler, 'write_pgpass', Mock(return_value={})) @patch.object(ConfigHandler, 'write_recovery_conf', Mock()) @patch.object(ConfigHandler, 'write_postgresql_conf', Mock()) @patch.object(Postgresql, 'query', Mock()) @patch.object(Postgresql, 'checkpoint', Mock()) @patch.object(CancellableSubprocess, 'call', Mock(return_value=0)) @patch.object(Postgresql, 'get_local_timeline_lsn_from_replication_connection', Mock(return_value=[2, 10])) @patch.object(Postgresql, 'get_master_timeline', Mock(return_value=2)) @patch.object(ConfigHandler, 'restore_configuration_files', Mock()) @patch.object(etcd.Client, 'write', etcd_write) @patch.object(etcd.Client, 'read', etcd_read) @patch.object(etcd.Client, 'delete', Mock(side_effect=etcd.EtcdException)) @patch('patroni.postgresql.polling_loop', Mock(return_value=range(1))) @patch('patroni.async_executor.AsyncExecutor.busy', PropertyMock(return_value=False)) @patch('patroni.async_executor.AsyncExecutor.run_async', run_async) @patch('subprocess.call', Mock(return_value=0)) @patch('time.sleep', Mock()) class TestHa(PostgresInit): @patch('socket.getaddrinfo', socket_getaddrinfo) @patch('patroni.dcs.dcs_modules', Mock(return_value=['patroni.dcs.etcd'])) @patch.object(etcd.Client, 'read', etcd_read) def setUp(self): super(TestHa, self).setUp() with patch.object(Client, 'machines') as mock_machines: mock_machines.__get__ = Mock(return_value=['http://remotehost:2379']) self.p.set_state('running') self.p.set_role('replica') self.p.postmaster_start_time = MagicMock(return_value=str(postmaster_start_time)) self.p.can_create_replica_without_replication_connection = MagicMock(return_value=False) self.e = get_dcs({'etcd': {'ttl': 30, 'host': 'ok:2379', 'scope': 'test', 'name': 'foo', 'retry_timeout': 10}}) self.ha = Ha(MockPatroni(self.p, self.e)) self.ha.old_cluster = self.e.get_cluster() self.ha.cluster = get_cluster_initialized_without_leader() self.ha.load_cluster_from_dcs = Mock() def test_update_lock(self): self.p.last_operation = Mock(side_effect=PostgresConnectionException('')) self.assertTrue(self.ha.update_lock(True)) def test_touch_member(self): self.p.timeline_wal_position = Mock(return_value=(0, 1, 0)) self.p.replica_cached_timeline = Mock(side_effect=Exception) self.ha.touch_member() self.p.timeline_wal_position = Mock(return_value=(0, 1, 1)) self.p.set_role('standby_leader') self.ha.touch_member() def test_is_leader(self): self.assertFalse(self.ha.is_leader()) def test_start_as_replica(self): self.p.is_healthy = false self.assertEqual(self.ha.run_cycle(), 'starting as a secondary') @patch('patroni.dcs.etcd.Etcd.initialize', return_value=True) def test_bootstrap_as_standby_leader(self, initialize): self.p.data_directory_empty = true self.ha.cluster = get_cluster_not_initialized_without_leader(cluster_config=ClusterConfig(0, {}, 0)) self.ha.cluster.is_unlocked = true self.ha.patroni.config._dynamic_configuration = {"standby_cluster": {"port": 5432}} self.assertEqual(self.ha.run_cycle(), 'trying to bootstrap a new standby leader') def test_bootstrap_waiting_for_standby_leader(self): self.p.data_directory_empty = true self.ha.cluster = get_cluster_initialized_without_leader() self.ha.cluster.config.data.update({'standby_cluster': {'port': 5432}}) self.assertEqual(self.ha.run_cycle(), 'waiting for standby_leader to bootstrap') @patch.object(Cluster, 'get_clone_member', Mock(return_value=Member(0, 'test', 1, {'api_url': 'http://127.0.0.1:8011/patroni', 'conn_url': 'postgres://127.0.0.1:5432/postgres'}))) @patch.object(Bootstrap, 'create_replica', Mock(return_value=0)) def test_start_as_cascade_replica_in_standby_cluster(self): self.p.data_directory_empty = true self.ha.cluster = get_standby_cluster_initialized_with_only_leader() self.ha.cluster.is_unlocked = false self.assertEqual(self.ha.run_cycle(), "trying to bootstrap from replica 'test'") def test_recover_replica_failed(self): self.p.controldata = lambda: {'Database cluster state': 'in recovery', 'Database system identifier': SYSID} self.p.is_running = false self.p.follow = false self.assertEqual(self.ha.run_cycle(), 'starting as a secondary') self.assertEqual(self.ha.run_cycle(), 'failed to start postgres') def test_recover_former_master(self): self.p.follow = false self.p.is_running = false self.p.name = 'leader' self.p.set_role('master') self.p.controldata = lambda: {'Database cluster state': 'shut down', 'Database system identifier': SYSID} self.ha.cluster = get_cluster_initialized_with_leader() self.assertEqual(self.ha.run_cycle(), 'starting as readonly because i had the session lock') @patch.object(Postgresql, 'fix_cluster_state', Mock()) def test_crash_recovery(self): self.p.is_running = false self.p.controldata = lambda: {'Database cluster state': 'in production', 'Database system identifier': SYSID} self.assertEqual(self.ha.run_cycle(), 'doing crash recovery in a single user mode') @patch.object(Rewind, 'rewind_or_reinitialize_needed_and_possible', Mock(return_value=True)) @patch.object(Rewind, 'can_rewind', PropertyMock(return_value=True)) def test_recover_with_rewind(self): self.p.is_running = false self.ha.cluster = get_cluster_initialized_with_leader() self.assertEqual(self.ha.run_cycle(), 'running pg_rewind from leader') @patch.object(Rewind, 'rewind_or_reinitialize_needed_and_possible', Mock(return_value=True)) @patch.object(Bootstrap, 'create_replica', Mock(return_value=1)) def test_recover_with_reinitialize(self): self.p.is_running = false self.ha.cluster = get_cluster_initialized_with_leader() self.assertEqual(self.ha.run_cycle(), 'reinitializing due to diverged timelines') @patch('sys.exit', return_value=1) @patch('patroni.ha.Ha.sysid_valid', MagicMock(return_value=True)) def test_sysid_no_match(self, exit_mock): self.p.controldata = lambda: {'Database cluster state': 'in recovery', 'Database system identifier': '123'} self.ha.run_cycle() exit_mock.assert_called_once_with(1) @patch.object(Cluster, 'is_unlocked', Mock(return_value=False)) def test_start_as_readonly(self): self.p.is_leader = false self.p.is_healthy = true self.ha.has_lock = true self.p.controldata = lambda: {'Database cluster state': 'in production', 'Database system identifier': SYSID} self.assertEqual(self.ha.run_cycle(), 'promoted self to leader because i had the session lock') @patch('psycopg2.connect', psycopg2_connect) def test_acquire_lock_as_master(self): self.assertEqual(self.ha.run_cycle(), 'acquired session lock as a leader') def test_promoted_by_acquiring_lock(self): self.ha.is_healthiest_node = true self.p.is_leader = false self.assertEqual(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') def test_long_promote(self): self.ha.cluster.is_unlocked = false self.ha.has_lock = true self.p.is_leader = false self.p.set_role('master') self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock') def test_demote_after_failing_to_obtain_lock(self): self.ha.acquire_lock = false self.assertEqual(self.ha.run_cycle(), 'demoted self after trying and failing to obtain lock') def test_follow_new_leader_after_failing_to_obtain_lock(self): self.ha.is_healthiest_node = true self.ha.acquire_lock = false self.p.is_leader = false self.assertEqual(self.ha.run_cycle(), 'following new leader after trying and failing to obtain lock') def test_demote_because_not_healthiest(self): self.ha.is_healthiest_node = false self.assertEqual(self.ha.run_cycle(), 'demoting self because i am not the healthiest node') def test_follow_new_leader_because_not_healthiest(self): self.ha.is_healthiest_node = false self.p.is_leader = false self.assertEqual(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node') def test_promote_because_have_lock(self): self.ha.cluster.is_unlocked = false self.ha.has_lock = true self.p.is_leader = false self.assertEqual(self.ha.run_cycle(), 'promoted self to leader because i had the session lock') def test_promote_without_watchdog(self): self.ha.cluster.is_unlocked = false self.ha.has_lock = true self.p.is_leader = true with patch.object(Watchdog, 'activate', Mock(return_value=False)): self.assertEqual(self.ha.run_cycle(), 'Demoting self because watchdog could not be activated') self.p.is_leader = false self.assertEqual(self.ha.run_cycle(), 'Not promoting self because watchdog could not be activated') def test_leader_with_lock(self): self.ha.cluster = get_cluster_initialized_with_leader() self.ha.cluster.is_unlocked = false self.ha.has_lock = true self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock') def test_demote_because_not_having_lock(self): self.ha.cluster.is_unlocked = false with patch.object(Watchdog, 'is_running', PropertyMock(return_value=True)): self.assertEqual(self.ha.run_cycle(), 'demoting self because i do not have the lock and i was a leader') def test_demote_because_update_lock_failed(self): self.ha.cluster.is_unlocked = false self.ha.has_lock = true self.ha.update_lock = false self.assertEqual(self.ha.run_cycle(), 'demoted self because failed to update leader lock in DCS') self.p.is_leader = false self.assertEqual(self.ha.run_cycle(), 'not promoting because failed to update leader lock in DCS') def test_follow(self): self.ha.cluster.is_unlocked = false self.p.is_leader = false self.assertEqual(self.ha.run_cycle(), 'no action. i am a secondary and i am following a leader') self.ha.patroni.replicatefrom = "foo" self.p.config.check_recovery_conf = Mock(return_value=(True, False)) self.assertEqual(self.ha.run_cycle(), 'no action. i am a secondary and i am following a leader') def test_follow_in_pause(self): self.ha.cluster.is_unlocked = false self.ha.is_paused = true self.assertEqual(self.ha.run_cycle(), 'PAUSE: continue to run as master without lock') self.p.is_leader = false self.assertEqual(self.ha.run_cycle(), 'PAUSE: no action') @patch.object(Rewind, 'rewind_or_reinitialize_needed_and_possible', Mock(return_value=True)) @patch.object(Rewind, 'can_rewind', PropertyMock(return_value=True)) def test_follow_triggers_rewind(self): self.p.is_leader = false self.ha._rewind.trigger_check_diverged_lsn() self.ha.cluster = get_cluster_initialized_with_leader() self.assertEqual(self.ha.run_cycle(), 'running pg_rewind from leader') def test_no_etcd_connection_master_demote(self): self.ha.load_cluster_from_dcs = Mock(side_effect=DCSError('Etcd is not responding properly')) self.assertEqual(self.ha.run_cycle(), 'demoted self because DCS is not accessible and i was a leader') @patch('time.sleep', Mock()) def test_bootstrap_from_another_member(self): self.ha.cluster = get_cluster_initialized_with_leader() self.assertEqual(self.ha.bootstrap(), 'trying to bootstrap from replica \'other\'') def test_bootstrap_waiting_for_leader(self): self.ha.cluster = get_cluster_initialized_without_leader() self.assertEqual(self.ha.bootstrap(), 'waiting for leader to bootstrap') def test_bootstrap_without_leader(self): self.ha.cluster = get_cluster_initialized_without_leader() self.p.can_create_replica_without_replication_connection = MagicMock(return_value=True) self.assertEqual(self.ha.bootstrap(), 'trying to bootstrap (without leader)') def test_bootstrap_initialize_lock_failed(self): self.ha.cluster = get_cluster_not_initialized_without_leader() self.assertEqual(self.ha.bootstrap(), 'failed to acquire initialize lock') def test_bootstrap_initialized_new_cluster(self): self.ha.cluster = get_cluster_not_initialized_without_leader() self.e.initialize = true self.assertEqual(self.ha.bootstrap(), 'trying to bootstrap a new cluster') self.p.is_leader = false self.assertEqual(self.ha.run_cycle(), 'waiting for end of recovery after bootstrap') self.p.is_leader = true self.assertEqual(self.ha.run_cycle(), 'running post_bootstrap') self.assertEqual(self.ha.run_cycle(), 'initialized a new cluster') def test_bootstrap_release_initialize_key_on_failure(self): self.ha.cluster = get_cluster_not_initialized_without_leader() self.e.initialize = true self.ha.bootstrap() self.p.is_running = false self.assertRaises(PatroniException, self.ha.post_bootstrap) def test_bootstrap_release_initialize_key_on_watchdog_failure(self): self.ha.cluster = get_cluster_not_initialized_without_leader() self.e.initialize = true self.ha.bootstrap() self.p.is_running.return_value = MockPostmaster() self.p.is_leader = true with patch.object(Watchdog, 'activate', Mock(return_value=False)): self.assertEqual(self.ha.post_bootstrap(), 'running post_bootstrap') self.assertRaises(PatroniException, self.ha.post_bootstrap) @patch('psycopg2.connect', psycopg2_connect) def test_reinitialize(self): self.assertIsNotNone(self.ha.reinitialize()) self.ha.cluster = get_cluster_initialized_with_leader() self.assertIsNone(self.ha.reinitialize(True)) self.ha._async_executor.schedule('reinitialize') self.assertIsNotNone(self.ha.reinitialize()) self.ha.state_handler.name = self.ha.cluster.leader.name self.assertIsNotNone(self.ha.reinitialize()) @patch('time.sleep', Mock()) def test_restart(self): self.assertEqual(self.ha.restart({}), (True, 'restarted successfully')) self.p.restart = Mock(return_value=None) self.assertEqual(self.ha.restart({}), (False, 'postgres is still starting')) self.p.restart = false self.assertEqual(self.ha.restart({}), (False, 'restart failed')) self.ha.cluster = get_cluster_initialized_with_leader() self.ha._async_executor.schedule('reinitialize') self.assertEqual(self.ha.restart({}), (False, 'reinitialize already in progress')) with patch.object(self.ha, "restart_matches", return_value=False): self.assertEqual(self.ha.restart({'foo': 'bar'}), (False, "restart conditions are not satisfied")) @patch('os.kill', Mock()) def test_restart_in_progress(self): with patch('patroni.async_executor.AsyncExecutor.busy', PropertyMock(return_value=True)): self.ha._async_executor.schedule('restart') self.assertTrue(self.ha.restart_scheduled()) self.assertEqual(self.ha.run_cycle(), 'restart in progress') self.ha.cluster = get_cluster_initialized_with_leader() self.assertEqual(self.ha.run_cycle(), 'restart in progress') self.ha.has_lock = true self.assertEqual(self.ha.run_cycle(), 'updated leader lock during restart') self.ha.update_lock = false self.p.set_role('master') with patch('patroni.async_executor.CriticalTask.cancel', Mock(return_value=False)): with patch('patroni.postgresql.Postgresql.terminate_starting_postmaster') as mock_terminate: self.assertEqual(self.ha.run_cycle(), 'lost leader lock during restart') mock_terminate.assert_called() def test_manual_failover_from_leader(self): self.ha.fetch_node_status = get_node_status() self.ha.has_lock = true self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', '', None)) self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock') self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, '', self.p.name, None)) self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock') self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, '', 'blabla', None)) self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock') f = Failover(0, self.p.name, '', None) self.ha.cluster = get_cluster_initialized_with_leader(f) self.assertEqual(self.ha.run_cycle(), 'manual failover: demoting myself') self.ha._rewind.rewind_or_reinitialize_needed_and_possible = true self.assertEqual(self.ha.run_cycle(), 'manual failover: demoting myself') self.ha.fetch_node_status = get_node_status(nofailover=True) self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock') self.ha.fetch_node_status = get_node_status(watchdog_failed=True) self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock') self.ha.fetch_node_status = get_node_status(timeline=1) self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock') self.ha.fetch_node_status = get_node_status(wal_position=1) self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock') # manual failover from the previous leader to us won't happen if we hold the nofailover flag self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', self.p.name, None)) self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock') # Failover scheduled time must include timezone scheduled = datetime.datetime.now() self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', self.p.name, scheduled)) self.ha.run_cycle() scheduled = datetime.datetime.utcnow().replace(tzinfo=tzutc) self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', self.p.name, scheduled)) self.assertEqual('no action. i am the leader with the lock', self.ha.run_cycle()) scheduled = scheduled + datetime.timedelta(seconds=30) self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', self.p.name, scheduled)) self.assertEqual('no action. i am the leader with the lock', self.ha.run_cycle()) scheduled = scheduled + datetime.timedelta(seconds=-600) self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', self.p.name, scheduled)) self.assertEqual('no action. i am the leader with the lock', self.ha.run_cycle()) scheduled = None self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', self.p.name, scheduled)) self.assertEqual('no action. i am the leader with the lock', self.ha.run_cycle()) def test_manual_failover_from_leader_in_pause(self): self.ha.has_lock = true self.ha.is_paused = true scheduled = datetime.datetime.now() self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', self.p.name, scheduled)) self.assertEqual('PAUSE: no action. i am the leader with the lock', self.ha.run_cycle()) self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, self.p.name, '', None)) self.assertEqual('PAUSE: no action. i am the leader with the lock', self.ha.run_cycle()) def test_manual_failover_from_leader_in_synchronous_mode(self): self.p.is_leader = true self.ha.has_lock = true self.ha.is_synchronous_mode = true self.ha.is_failover_possible = false self.ha.process_sync_replication = Mock() self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, self.p.name, 'a', None), (self.p.name, None)) self.assertEqual('no action. i am the leader with the lock', self.ha.run_cycle()) self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, self.p.name, 'a', None), (self.p.name, 'a')) self.ha.is_failover_possible = true self.assertEqual('manual failover: demoting myself', self.ha.run_cycle()) def test_manual_failover_process_no_leader(self): self.p.is_leader = false self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, '', self.p.name, None)) self.assertEqual(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, '', 'leader', None)) self.p.set_role('replica') self.assertEqual(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') self.ha.fetch_node_status = get_node_status() # accessible, in_recovery self.assertEqual(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node') self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, self.p.name, '', None)) self.assertEqual(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node') self.ha.fetch_node_status = get_node_status(reachable=False) # inaccessible, in_recovery self.p.set_role('replica') self.assertEqual(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') # set failover flag to True for all members of the cluster # this should elect the current member, as we are not going to call the API for it. self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, '', 'other', None)) self.ha.fetch_node_status = get_node_status(nofailover=True) # accessible, in_recovery self.p.set_role('replica') self.assertEqual(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') # same as previous, but set the current member to nofailover. In no case it should be elected as a leader self.ha.patroni.nofailover = True self.assertEqual(self.ha.run_cycle(), 'following a different leader because I am not allowed to promote') def test_manual_failover_process_no_leader_in_pause(self): self.ha.is_paused = true self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, '', 'other', None)) self.assertEqual(self.ha.run_cycle(), 'PAUSE: continue to run as master without lock') self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, 'leader', '', None)) self.assertEqual(self.ha.run_cycle(), 'PAUSE: continue to run as master without lock') self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, 'leader', 'blabla', None)) self.assertEqual('PAUSE: acquired session lock as a leader', self.ha.run_cycle()) self.p.is_leader = false self.p.set_role('replica') self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, 'leader', self.p.name, None)) self.assertEqual(self.ha.run_cycle(), 'PAUSE: promoted self to leader by acquiring session lock') def test_is_healthiest_node(self): self.ha.state_handler.is_leader = false self.ha.patroni.nofailover = False self.ha.fetch_node_status = get_node_status() self.assertTrue(self.ha.is_healthiest_node()) with patch.object(Watchdog, 'is_healthy', PropertyMock(return_value=False)): self.assertFalse(self.ha.is_healthiest_node()) with patch('patroni.postgresql.Postgresql.is_starting', return_value=True): self.assertFalse(self.ha.is_healthiest_node()) self.ha.is_paused = true self.assertFalse(self.ha.is_healthiest_node()) def test__is_healthiest_node(self): self.ha.cluster = get_cluster_initialized_without_leader(sync=('postgresql1', self.p.name)) self.assertTrue(self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.p.is_leader = false self.ha.fetch_node_status = get_node_status() # accessible, in_recovery self.assertTrue(self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.ha.fetch_node_status = get_node_status(in_recovery=False) # accessible, not in_recovery self.assertFalse(self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.ha.fetch_node_status = get_node_status(wal_position=11) # accessible, in_recovery, wal position ahead self.assertFalse(self.ha._is_healthiest_node(self.ha.old_cluster.members)) # in synchronous_mode consider itself healthy if the former leader is accessible in read-only and ahead of us with patch.object(Ha, 'is_synchronous_mode', Mock(return_value=True)): self.assertTrue(self.ha._is_healthiest_node(self.ha.old_cluster.members)) with patch('patroni.postgresql.Postgresql.timeline_wal_position', return_value=(1, 1, 1)): self.assertFalse(self.ha._is_healthiest_node(self.ha.old_cluster.members)) with patch('patroni.postgresql.Postgresql.replica_cached_timeline', return_value=1): self.assertFalse(self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.ha.patroni.nofailover = True self.assertFalse(self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.ha.patroni.nofailover = False def test_fetch_node_status(self): member = Member(0, 'test', 1, {'api_url': 'http://127.0.0.1:8011/patroni'}) self.ha.fetch_node_status(member) member = Member(0, 'test', 1, {'api_url': 'http://localhost:8011/patroni'}) self.ha.fetch_node_status(member) def test_post_recover(self): self.p.is_running = false self.ha.has_lock = true self.assertEqual(self.ha.post_recover(), 'removed leader key after trying and failing to start postgres') self.ha.has_lock = false self.assertEqual(self.ha.post_recover(), 'failed to start postgres') self.p.is_running = true self.assertIsNone(self.ha.post_recover()) def test_schedule_future_restart(self): self.ha.patroni.scheduled_restart = {} # do the restart 2 times. The first one should succeed, the second one should fail self.assertTrue(self.ha.schedule_future_restart({'schedule': future_restart_time})) self.assertFalse(self.ha.schedule_future_restart({'schedule': future_restart_time})) def test_delete_future_restarts(self): self.ha.delete_future_restart() def test_evaluate_scheduled_restart(self): self.p.postmaster_start_time = Mock(return_value=str(postmaster_start_time)) # restart already in progres with patch('patroni.async_executor.AsyncExecutor.busy', PropertyMock(return_value=True)): self.assertIsNone(self.ha.evaluate_scheduled_restart()) # restart while the postmaster has been already restarted, fails with patch.object(self.ha, 'future_restart_scheduled', Mock(return_value={'postmaster_start_time': str(postmaster_start_time - datetime.timedelta(days=1)), 'schedule': str(future_restart_time)})): self.assertIsNone(self.ha.evaluate_scheduled_restart()) with patch.object(self.ha, 'future_restart_scheduled', Mock(return_value={'postmaster_start_time': str(postmaster_start_time), 'schedule': str(future_restart_time)})): with patch.object(self.ha, 'should_run_scheduled_action', Mock(return_value=True)): # restart in the future, ok self.assertIsNotNone(self.ha.evaluate_scheduled_restart()) with patch.object(self.ha, 'restart', Mock(return_value=(False, "Test"))): # restart in the future, bit the actual restart failed self.assertIsNone(self.ha.evaluate_scheduled_restart()) def test_scheduled_restart(self): self.ha.cluster = get_cluster_initialized_with_leader() with patch.object(self.ha, "evaluate_scheduled_restart", Mock(return_value="restart scheduled")): self.assertEqual(self.ha.run_cycle(), "restart scheduled") def test_restart_matches(self): self.p._role = 'replica' self.p._connection.server_version = 90500 self.p._pending_restart = True self.assertFalse(self.ha.restart_matches("master", "9.5.0", True)) self.assertFalse(self.ha.restart_matches("replica", "9.4.3", True)) self.p._pending_restart = False self.assertFalse(self.ha.restart_matches("replica", "9.5.2", True)) self.assertTrue(self.ha.restart_matches("replica", "9.5.2", False)) def test_process_healthy_cluster_in_pause(self): self.p.is_leader = false self.ha.is_paused = true self.p.name = 'leader' self.ha.cluster = get_cluster_initialized_with_leader() self.assertEqual(self.ha.run_cycle(), 'PAUSE: removed leader lock because postgres is not running as master') self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, '', self.p.name, None)) self.assertEqual(self.ha.run_cycle(), 'PAUSE: waiting to become master after promote...') def test_process_healthy_standby_cluster_as_standby_leader(self): self.p.is_leader = false self.p.name = 'leader' self.ha.cluster = get_standby_cluster_initialized_with_only_leader() self.p.config.check_recovery_conf = Mock(return_value=(False, False)) self.assertEqual(self.ha.run_cycle(), 'promoted self to a standby leader because i had the session lock') self.assertEqual(self.ha.run_cycle(), 'no action. i am the standby leader with the lock') def test_process_healthy_standby_cluster_as_cascade_replica(self): self.p.is_leader = false self.p.name = 'replica' self.ha.cluster = get_standby_cluster_initialized_with_only_leader() self.assertEqual(self.ha.run_cycle(), 'no action. i am a secondary and i am following a standby leader') with patch.object(Leader, 'conn_url', PropertyMock(return_value='')): self.assertEqual(self.ha.run_cycle(), 'continue following the old known standby leader') def test_process_unhealthy_standby_cluster_as_standby_leader(self): self.p.is_leader = false self.p.name = 'leader' self.ha.cluster = get_standby_cluster_initialized_with_only_leader() self.ha.cluster.is_unlocked = true self.ha.sysid_valid = true self.p._sysid = True self.assertEqual(self.ha.run_cycle(), 'promoted self to a standby leader by acquiring session lock') @patch.object(Rewind, 'rewind_or_reinitialize_needed_and_possible', Mock(return_value=True)) @patch.object(Rewind, 'can_rewind', PropertyMock(return_value=True)) def test_process_unhealthy_standby_cluster_as_cascade_replica(self): self.p.is_leader = false self.p.name = 'replica' self.ha.cluster = get_standby_cluster_initialized_with_only_leader() self.ha.is_unlocked = true self.assertTrue(self.ha.run_cycle().startswith('running pg_rewind from remote_master:')) def test_recover_unhealthy_leader_in_standby_cluster(self): self.p.is_leader = false self.p.name = 'leader' self.p.is_running = false self.p.follow = false self.ha.cluster = get_standby_cluster_initialized_with_only_leader() self.assertEqual(self.ha.run_cycle(), 'starting as a standby leader because i had the session lock') def test_recover_unhealthy_unlocked_standby_cluster(self): self.p.is_leader = false self.p.name = 'leader' self.p.is_running = false self.p.follow = false self.ha.cluster = get_standby_cluster_initialized_with_only_leader() self.ha.cluster.is_unlocked = true self.ha.has_lock = false self.assertEqual(self.ha.run_cycle(), 'trying to follow a remote master because standby cluster is unhealthy') def test_failed_to_update_lock_in_pause(self): self.ha.update_lock = false self.ha.is_paused = true self.p.name = 'leader' self.ha.cluster = get_cluster_initialized_with_leader() self.assertEqual(self.ha.run_cycle(), 'PAUSE: continue to run as master after failing to update leader lock in DCS') def test_postgres_unhealthy_in_pause(self): self.ha.is_paused = true self.p.is_healthy = false self.assertEqual(self.ha.run_cycle(), 'PAUSE: postgres is not running') self.ha.has_lock = true self.assertEqual(self.ha.run_cycle(), 'PAUSE: removed leader lock because postgres is not running') def test_no_etcd_connection_in_pause(self): self.ha.is_paused = true self.ha.load_cluster_from_dcs = Mock(side_effect=DCSError('Etcd is not responding properly')) self.assertEqual(self.ha.run_cycle(), 'PAUSE: DCS is not accessible') @patch('patroni.ha.Ha.update_lock', return_value=True) @patch('patroni.ha.Ha.demote') def test_starting_timeout(self, demote, update_lock): def check_calls(seq): for mock, called in seq: if called: mock.assert_called_once() else: mock.assert_not_called() mock.reset_mock() self.ha.has_lock = true self.ha.cluster = get_cluster_initialized_with_leader() self.p.check_for_startup = true self.p.time_in_state = lambda: 30 self.assertEqual(self.ha.run_cycle(), 'PostgreSQL is still starting up, 270 seconds until timeout') check_calls([(update_lock, True), (demote, False)]) self.p.time_in_state = lambda: 350 self.ha.fetch_node_status = get_node_status(reachable=False) # inaccessible, in_recovery self.assertEqual(self.ha.run_cycle(), 'master start has timed out, but continuing to wait because failover is not possible') check_calls([(update_lock, True), (demote, False)]) self.ha.fetch_node_status = get_node_status() # accessible, in_recovery self.assertEqual(self.ha.run_cycle(), 'stopped PostgreSQL because of startup timeout') check_calls([(update_lock, True), (demote, True)]) update_lock.return_value = False self.assertEqual(self.ha.run_cycle(), 'stopped PostgreSQL while starting up because leader key was lost') check_calls([(update_lock, True), (demote, True)]) self.ha.has_lock = false self.p.is_leader = false self.assertEqual(self.ha.run_cycle(), 'no action. i am a secondary and i am following a leader') check_calls([(update_lock, False), (demote, False)]) def test_manual_failover_while_starting(self): self.ha.has_lock = true self.p.check_for_startup = true f = Failover(0, self.p.name, '', None) self.ha.cluster = get_cluster_initialized_with_leader(f) self.ha.fetch_node_status = get_node_status() # accessible, in_recovery self.assertEqual(self.ha.run_cycle(), 'manual failover: demoting myself') @patch('patroni.ha.Ha.demote') def test_failover_immediately_on_zero_master_start_timeout(self, demote): self.p.is_running = false self.ha.cluster = get_cluster_initialized_with_leader(sync=(self.p.name, 'other')) self.ha.cluster.config.data['synchronous_mode'] = True self.ha.patroni.config.set_dynamic_configuration({'master_start_timeout': 0}) self.ha.has_lock = true self.ha.update_lock = true self.ha.fetch_node_status = get_node_status() # accessible, in_recovery self.assertEqual(self.ha.run_cycle(), 'stopped PostgreSQL to fail over after a crash') demote.assert_called_once() @patch('patroni.postgresql.Postgresql.follow') def test_demote_immediate(self, follow): self.ha.has_lock = true self.e.get_cluster = Mock(return_value=get_cluster_initialized_without_leader()) self.ha.demote('immediate') follow.assert_called_once_with(None) def test_process_sync_replication(self): self.ha.has_lock = true mock_set_sync = self.p.config.set_synchronous_standby = Mock() self.p.name = 'leader' # Test sync key removed when sync mode disabled self.ha.cluster = get_cluster_initialized_with_leader(sync=('leader', 'other')) with patch.object(self.ha.dcs, 'delete_sync_state') as mock_delete_sync: self.ha.run_cycle() mock_delete_sync.assert_called_once() mock_set_sync.assert_called_once_with(None) mock_set_sync.reset_mock() # Test sync key not touched when not there self.ha.cluster = get_cluster_initialized_with_leader() with patch.object(self.ha.dcs, 'delete_sync_state') as mock_delete_sync: self.ha.run_cycle() mock_delete_sync.assert_not_called() mock_set_sync.assert_called_once_with(None) mock_set_sync.reset_mock() self.ha.is_synchronous_mode = true # Test sync standby not touched when picking the same node self.p.pick_synchronous_standby = Mock(return_value=('other', True)) self.ha.cluster = get_cluster_initialized_with_leader(sync=('leader', 'other')) self.ha.run_cycle() mock_set_sync.assert_not_called() mock_set_sync.reset_mock() # Test sync standby is replaced when switching standbys self.p.pick_synchronous_standby = Mock(return_value=('other2', False)) self.ha.dcs.write_sync_state = Mock(return_value=True) self.ha.run_cycle() mock_set_sync.assert_called_once_with('other2') mock_set_sync.reset_mock() # Test sync standby is not disabled when updating dcs fails self.ha.dcs.write_sync_state = Mock(return_value=False) self.ha.run_cycle() mock_set_sync.assert_not_called() mock_set_sync.reset_mock() # Test changing sync standby self.ha.dcs.write_sync_state = Mock(return_value=True) self.ha.dcs.get_cluster = Mock(return_value=get_cluster_initialized_with_leader(sync=('leader', 'other'))) # self.ha.cluster = get_cluster_initialized_with_leader(sync=('leader', 'other')) self.p.pick_synchronous_standby = Mock(return_value=('other2', True)) self.ha.run_cycle() self.ha.dcs.get_cluster.assert_called_once() self.assertEqual(self.ha.dcs.write_sync_state.call_count, 2) # Test updating sync standby key failed due to race self.ha.dcs.write_sync_state = Mock(side_effect=[True, False]) self.ha.run_cycle() self.assertEqual(self.ha.dcs.write_sync_state.call_count, 2) # Test updating sync standby key failed due to DCS being not accessible self.ha.dcs.write_sync_state = Mock(return_value=True) self.ha.dcs.get_cluster = Mock(side_effect=DCSError('foo')) self.ha.run_cycle() # Test changing sync standby failed due to race self.ha.dcs.get_cluster = Mock(return_value=get_cluster_initialized_with_leader(sync=('somebodyelse', None))) self.ha.run_cycle() self.assertEqual(self.ha.dcs.write_sync_state.call_count, 2) # Test sync set to '*' when synchronous_mode_strict is enabled mock_set_sync.reset_mock() self.ha.is_synchronous_mode_strict = true self.p.pick_synchronous_standby = Mock(return_value=(None, False)) self.ha.run_cycle() mock_set_sync.assert_called_once_with('*') def test_sync_replication_become_master(self): self.ha.is_synchronous_mode = true mock_set_sync = self.p.config.set_synchronous_standby = Mock() self.p.is_leader = false self.p.set_role('replica') self.ha.has_lock = true mock_write_sync = self.ha.dcs.write_sync_state = Mock(return_value=True) self.p.name = 'leader' self.ha.cluster = get_cluster_initialized_with_leader(sync=('other', None)) # When we just became master nobody is sync self.assertEqual(self.ha.enforce_master_role('msg', 'promote msg'), 'promote msg') mock_set_sync.assert_called_once_with(None) mock_write_sync.assert_called_once_with('leader', None, index=0) mock_set_sync.reset_mock() # When we just became master nobody is sync self.p.set_role('replica') mock_write_sync.return_value = False self.assertTrue(self.ha.enforce_master_role('msg', 'promote msg') != 'promote msg') mock_set_sync.assert_not_called() def test_unhealthy_sync_mode(self): self.ha.is_synchronous_mode = true self.p.is_leader = false self.p.set_role('replica') self.p.name = 'other' self.ha.cluster = get_cluster_initialized_without_leader(sync=('leader', 'other2')) mock_write_sync = self.ha.dcs.write_sync_state = Mock(return_value=True) mock_acquire = self.ha.acquire_lock = Mock(return_value=True) mock_follow = self.p.follow = Mock() mock_promote = self.p.promote = Mock() # If we don't match the sync replica we are not allowed to acquire lock self.ha.run_cycle() mock_acquire.assert_not_called() mock_follow.assert_called_once() self.assertEqual(mock_follow.call_args[0][0], None) mock_write_sync.assert_not_called() mock_follow.reset_mock() # If we do match we will try to promote self.ha._is_healthiest_node = true self.ha.cluster = get_cluster_initialized_without_leader(sync=('leader', 'other')) self.ha.run_cycle() mock_acquire.assert_called_once() mock_follow.assert_not_called() mock_promote.assert_called_once() mock_write_sync.assert_called_once_with('other', None, index=0) def test_disable_sync_when_restarting(self): self.ha.is_synchronous_mode = true self.p.name = 'other' self.p.is_leader = false self.p.set_role('replica') mock_restart = self.p.restart = Mock(return_value=True) self.ha.cluster = get_cluster_initialized_with_leader(sync=('leader', 'other')) self.ha.touch_member = Mock(return_value=True) self.ha.dcs.get_cluster = Mock(side_effect=[ get_cluster_initialized_with_leader(sync=('leader', syncstandby)) for syncstandby in ['other', None]]) with patch('time.sleep') as mock_sleep: self.ha.restart({}) mock_restart.assert_called_once() mock_sleep.assert_called() # Restart is still called when DCS connection fails mock_restart.reset_mock() self.ha.dcs.get_cluster = Mock(side_effect=DCSError("foo")) self.ha.restart({}) mock_restart.assert_called_once() # We don't try to fetch the cluster state when touch_member fails mock_restart.reset_mock() self.ha.dcs.get_cluster.reset_mock() self.ha.touch_member = Mock(return_value=False) self.ha.restart({}) mock_restart.assert_called_once() self.ha.dcs.get_cluster.assert_not_called() def test_effective_tags(self): self.ha._disable_sync = True self.assertEqual(self.ha.get_effective_tags(), {'foo': 'bar', 'nosync': True}) self.ha._disable_sync = False self.assertEqual(self.ha.get_effective_tags(), {'foo': 'bar'}) def test_restore_cluster_config(self): self.ha.cluster.config.data.clear() self.ha.has_lock = true self.ha.cluster.is_unlocked = false self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock') def test_watch(self): self.ha.cluster = get_cluster_initialized_with_leader() self.ha.watch(0) def test_wakup(self): self.ha.wakeup() def test_shutdown(self): self.p.is_running = false self.ha.has_lock = true self.ha.shutdown() @patch('time.sleep', Mock()) def test_leader_with_empty_directory(self): self.ha.cluster = get_cluster_initialized_with_leader() self.ha.has_lock = true self.p.data_directory_empty = true self.assertEqual(self.ha.run_cycle(), 'released leader key voluntarily as data dir empty and currently leader') self.assertEqual(self.p.role, 'uninitialized') # as has_lock is mocked out, we need to fake the leader key release self.ha.has_lock = false # will not say bootstrap from leader as replica can't self elect self.assertEqual(self.ha.run_cycle(), "trying to bootstrap from replica 'other'") @patch('psycopg2.connect', psycopg2_connect) def test_update_cluster_history(self): self.ha.has_lock = true self.ha.cluster.is_unlocked = false for tl in (1, 3): self.p.get_master_timeline = Mock(return_value=tl) self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock') @patch('sys.exit', return_value=1) def test_abort_join(self, exit_mock): self.ha.cluster = get_cluster_not_initialized_without_leader() self.p.is_leader = false self.ha.run_cycle() exit_mock.assert_called_once_with(1) def test_after_pause(self): self.ha.has_lock = true self.ha.cluster.is_unlocked = false self.ha.is_paused = true self.assertEqual(self.ha.run_cycle(), 'PAUSE: no action. i am the leader with the lock') self.ha.is_paused = false self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock') @patch('psycopg2.connect', psycopg2_connect) def test_permanent_logical_slots_after_promote(self): config = ClusterConfig(1, {'slots': {'l': {'database': 'postgres', 'plugin': 'test_decoding'}}}, 1) self.ha.cluster = get_cluster_initialized_without_leader(cluster_config=config) self.assertEqual(self.ha.run_cycle(), 'acquired session lock as a leader') self.ha.cluster = get_cluster_initialized_without_leader(leader=True, cluster_config=config) self.ha.has_lock = true self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock') patroni-1.6.4/tests/test_kubernetes.py000066400000000000000000000210111361356115100201150ustar00rootroot00000000000000import json import time import unittest from mock import Mock, patch from patroni.dcs.kubernetes import Kubernetes, KubernetesError, k8s_client, RetryFailedError from threading import Thread from . import SleepException def mock_list_namespaced_config_map(self, *args, **kwargs): metadata = {'resource_version': '1', 'labels': {'f': 'b'}, 'name': 'test-config', 'annotations': {'initialize': '123', 'config': '{}'}} items = [k8s_client.V1ConfigMap(metadata=k8s_client.V1ObjectMeta(**metadata))] metadata.update({'name': 'test-leader', 'annotations': {'optime': '1234', 'leader': 'p-0', 'ttl': '30s'}}) items.append(k8s_client.V1ConfigMap(metadata=k8s_client.V1ObjectMeta(**metadata))) metadata.update({'name': 'test-failover', 'annotations': {'leader': 'p-0'}}) items.append(k8s_client.V1ConfigMap(metadata=k8s_client.V1ObjectMeta(**metadata))) metadata.update({'name': 'test-sync', 'annotations': {'leader': 'p-0'}}) items.append(k8s_client.V1ConfigMap(metadata=k8s_client.V1ObjectMeta(**metadata))) metadata = k8s_client.V1ObjectMeta(resource_version='1') return k8s_client.V1ConfigMapList(metadata=metadata, items=items, kind='ConfigMapList') def mock_list_namespaced_pod(self, *args, **kwargs): metadata = k8s_client.V1ObjectMeta(resource_version='1', name='p-0', annotations={'status': '{}'}) items = [k8s_client.V1Pod(metadata=metadata)] return k8s_client.V1PodList(items=items, kind='PodList') def mock_config_map(*args, **kwargs): mock = Mock() mock.metadata.resource_version = '2' return mock @patch.object(k8s_client.CoreV1Api, 'patch_namespaced_config_map', mock_config_map) @patch.object(k8s_client.CoreV1Api, 'create_namespaced_config_map', mock_config_map) @patch('kubernetes.client.api_client.ThreadPool', Mock(), create=True) @patch.object(Thread, 'start', Mock()) class TestKubernetes(unittest.TestCase): @patch('kubernetes.config.load_kube_config', Mock()) @patch.object(k8s_client.CoreV1Api, 'list_namespaced_config_map', mock_list_namespaced_config_map) @patch.object(k8s_client.CoreV1Api, 'list_namespaced_pod', mock_list_namespaced_pod) @patch('kubernetes.client.api_client.ThreadPool', Mock(), create=True) @patch.object(Thread, 'start', Mock()) def setUp(self): self.k = Kubernetes({'ttl': 30, 'scope': 'test', 'name': 'p-0', 'loop_wait': 10, 'retry_timeout': 10, 'labels': {'f': 'b'}}) self.assertRaises(AttributeError, self.k._pods._build_cache) self.k._pods._is_ready = True self.assertRaises(AttributeError, self.k._kinds._build_cache) self.k._kinds._is_ready = True self.k.get_cluster() @patch('time.time', Mock(side_effect=[1, 10.9, 100])) def test__wait_caches(self): self.k._pods._is_ready = False with self.k._condition: self.assertRaises(RetryFailedError, self.k._wait_caches) def test_get_cluster(self): with patch.object(k8s_client.CoreV1Api, 'list_namespaced_config_map', mock_list_namespaced_config_map), \ patch.object(k8s_client.CoreV1Api, 'list_namespaced_pod', mock_list_namespaced_pod), \ patch('time.time', Mock(return_value=time.time() + 31)): self.k.get_cluster() with patch.object(Kubernetes, '_wait_caches', Mock(side_effect=Exception)): self.assertRaises(KubernetesError, self.k.get_cluster) @patch('kubernetes.config.load_kube_config', Mock()) @patch.object(k8s_client.CoreV1Api, 'create_namespaced_endpoints', Mock()) def test_update_leader(self): k = Kubernetes({'ttl': 30, 'scope': 'test', 'name': 'p-0', 'loop_wait': 10, 'retry_timeout': 10, 'labels': {'f': 'b'}, 'use_endpoints': True, 'pod_ip': '10.0.0.0'}) self.assertIsNotNone(k.update_leader('123')) @patch('kubernetes.config.load_kube_config', Mock()) @patch.object(k8s_client.CoreV1Api, 'create_namespaced_endpoints', Mock()) def test_update_leader_with_restricted_access(self): k = Kubernetes({'ttl': 30, 'scope': 'test', 'name': 'p-0', 'loop_wait': 10, 'retry_timeout': 10, 'labels': {'f': 'b'}, 'use_endpoints': True, 'pod_ip': '10.0.0.0'}) self.assertIsNotNone(k.update_leader('123', True)) def test_take_leader(self): self.k.take_leader() self.k._leader_observed_record['leader'] = 'test' self.k.patch_or_create = Mock(return_value=False) self.k.take_leader() def test_manual_failover(self): with patch.object(k8s_client.CoreV1Api, 'patch_namespaced_config_map', Mock(side_effect=RetryFailedError(''))): self.k.manual_failover('foo', 'bar') def test_set_config_value(self): self.k.set_config_value('{}') @patch.object(k8s_client.CoreV1Api, 'patch_namespaced_pod', Mock(return_value=True)) def test_touch_member(self): self.k.touch_member({'role': 'replica'}) self.k._name = 'p-1' self.k.touch_member({'state': 'running', 'role': 'replica'}) self.k.touch_member({'state': 'stopped', 'role': 'master'}) def test_initialize(self): self.k.initialize() def test_delete_leader(self): self.k.delete_leader() def test_cancel_initialization(self): self.k.cancel_initialization() @patch.object(k8s_client.CoreV1Api, 'delete_collection_namespaced_config_map', Mock(side_effect=k8s_client.rest.ApiException(403, ''))) def test_delete_cluster(self): self.k.delete_cluster() @patch('kubernetes.config.load_kube_config', Mock()) @patch.object(k8s_client.CoreV1Api, 'create_namespaced_endpoints', Mock(side_effect=[k8s_client.rest.ApiException(502, ''), k8s_client.rest.ApiException(500, '')])) def test_delete_sync_state(self): k = Kubernetes({'ttl': 30, 'scope': 'test', 'name': 'p-0', 'loop_wait': 10, 'retry_timeout': 10, 'labels': {'f': 'b'}, 'use_endpoints': True, 'pod_ip': '10.0.0.0'}) self.assertFalse(k.delete_sync_state()) def test_watch(self): self.k.set_ttl(10) self.k.watch(None, 0) self.k.watch(None, 0) def test_set_history_value(self): self.k.set_history_value('{}') @patch('kubernetes.config.load_kube_config', Mock()) @patch('patroni.dcs.kubernetes.ObjectCache', Mock()) @patch.object(k8s_client.CoreV1Api, 'patch_namespaced_pod', Mock(return_value=True)) @patch.object(k8s_client.CoreV1Api, 'create_namespaced_endpoints', Mock()) @patch.object(k8s_client.CoreV1Api, 'create_namespaced_service', Mock(side_effect=[True, False, k8s_client.rest.ApiException(500, '')])) def test__create_config_service(self): k = Kubernetes({'ttl': 30, 'scope': 'test', 'name': 'p-0', 'loop_wait': 10, 'retry_timeout': 10, 'labels': {'f': 'b'}, 'use_endpoints': True, 'pod_ip': '10.0.0.0'}) self.assertIsNotNone(k.patch_or_create_config({'foo': 'bar'})) self.assertIsNotNone(k.patch_or_create_config({'foo': 'bar'})) k.touch_member({'state': 'running', 'role': 'replica'}) class TestCacheBuilder(unittest.TestCase): @patch('kubernetes.config.load_kube_config', Mock()) @patch('kubernetes.client.api_client.ThreadPool', Mock(), create=True) @patch.object(Thread, 'start', Mock()) def setUp(self): self.k = Kubernetes({'ttl': 30, 'scope': 'test', 'name': 'p-0', 'loop_wait': 10, 'retry_timeout': 10, 'labels': {'f': 'b'}}) @patch.object(k8s_client.CoreV1Api, 'list_namespaced_config_map', mock_list_namespaced_config_map) @patch('patroni.dcs.kubernetes.ObjectCache._watch') def test__build_cache(self, mock_response): mock_response.return_value.read_chunked.return_value = [json.dumps( {'type': 'MODIFIED', 'object': {'metadata': { 'name': self.k.config_path, 'resourceVersion': '2', 'annotations': {self.k._CONFIG: 'foo'}}}} ).encode('utf-8'), ('\n' + json.dumps( {'type': 'DELETED', 'object': {'metadata': { 'name': self.k.config_path, 'resourceVersion': '3'}}} ) + '\n' + json.dumps( {'type': 'MDIFIED', 'object': {'metadata': {'name': self.k.config_path}}} ) + '\n' + json.dumps({'object': {'code': 410}}) + '\n').encode('utf-8')] self.k._kinds._build_cache() @patch('patroni.dcs.kubernetes.logger.error', Mock(side_effect=SleepException)) @patch('patroni.dcs.kubernetes.ObjectCache._build_cache', Mock(side_effect=Exception)) def test_run(self): self.assertRaises(SleepException, self.k._pods.run) patroni-1.6.4/tests/test_log.py000066400000000000000000000044351361356115100165420ustar00rootroot00000000000000import logging import os import sys import unittest import yaml from mock import Mock, patch from patroni.config import Config from patroni.log import PatroniLogger from six.moves.queue import Queue, Full _LOG = logging.getLogger(__name__) class TestPatroniLogger(unittest.TestCase): def setUp(self): self._handlers = logging.getLogger().handlers[:] def tearDown(self): logging.getLogger().handlers[:] = self._handlers @patch('logging.FileHandler._open', Mock()) def test_patroni_logger(self): config = { 'log': { 'traceback_level': 'DEBUG', 'max_queue_size': 5, 'dir': 'foo', 'file_size': 4096, 'file_num': 5, 'loggers': { 'foo.bar': 'INFO' } }, 'restapi': {}, 'postgresql': {'data_dir': 'foo'} } sys.argv = ['patroni.py'] os.environ[Config.PATRONI_CONFIG_VARIABLE] = yaml.dump(config, default_flow_style=False) logger = PatroniLogger() patroni_config = Config(None) logger.reload_config(patroni_config['log']) _LOG.exception('test') logger.start() with patch.object(logging.Handler, 'format', Mock(side_effect=Exception)): logging.error('test') self.assertEqual(logger.log_handler.maxBytes, config['log']['file_size']) self.assertEqual(logger.log_handler.backupCount, config['log']['file_num']) config['log']['level'] = 'DEBUG' config['log'].pop('dir') with patch('logging.Handler.close', Mock(side_effect=Exception)): logger.reload_config(config['log']) with patch.object(logging.Logger, 'makeRecord', Mock(side_effect=[logging.LogRecord('', logging.INFO, '', 0, '', (), None), Exception])): logging.exception('test') logging.error('test') with patch.object(Queue, 'put_nowait', Mock(side_effect=Full)): self.assertRaises(SystemExit, logger.shutdown) self.assertRaises(Exception, logger.shutdown) self.assertLessEqual(logger.queue_size, 2) # "Failed to close the old log handler" could be still in the queue self.assertEqual(logger.records_lost, 0) patroni-1.6.4/tests/test_patroni.py000066400000000000000000000164731361356115100174420ustar00rootroot00000000000000import etcd import logging import os import signal import time import unittest import patroni.config as config from mock import Mock, PropertyMock, patch from patroni.api import RestApiServer from patroni.async_executor import AsyncExecutor from patroni.dcs.etcd import Client from patroni.exceptions import DCSError from patroni.postgresql import Postgresql from patroni.postgresql.config import ConfigHandler from patroni import Patroni, main as _main, patroni_main, check_psycopg2 from six.moves import BaseHTTPServer, builtins from threading import Thread from . import psycopg2_connect, SleepException from .test_etcd import etcd_read, etcd_write from .test_postgresql import MockPostmaster class MockFrozenImporter(object): toc = set(['patroni.dcs.etcd']) @patch('time.sleep', Mock()) @patch('subprocess.call', Mock(return_value=0)) @patch('psycopg2.connect', psycopg2_connect) @patch.object(ConfigHandler, 'append_pg_hba', Mock()) @patch.object(ConfigHandler, 'write_postgresql_conf', Mock()) @patch.object(ConfigHandler, 'write_recovery_conf', Mock()) @patch.object(Postgresql, 'is_running', Mock(return_value=MockPostmaster())) @patch.object(Postgresql, 'call_nowait', Mock()) @patch.object(BaseHTTPServer.HTTPServer, '__init__', Mock()) @patch.object(AsyncExecutor, 'run', Mock()) @patch.object(etcd.Client, 'write', etcd_write) @patch.object(etcd.Client, 'read', etcd_read) class TestPatroni(unittest.TestCase): def test_no_config(self): self.assertRaises(SystemExit, patroni_main) @patch('pkgutil.get_importer', Mock(return_value=MockFrozenImporter())) @patch('sys.frozen', Mock(return_value=True), create=True) @patch.object(BaseHTTPServer.HTTPServer, '__init__', Mock()) @patch.object(etcd.Client, 'read', etcd_read) @patch.object(Thread, 'start', Mock()) @patch.object(Client, 'machines', PropertyMock(return_value=['http://remotehost:2379'])) def setUp(self): self._handlers = logging.getLogger().handlers[:] RestApiServer._BaseServer__is_shut_down = Mock() RestApiServer._BaseServer__shutdown_request = True RestApiServer.socket = 0 os.environ['PATRONI_POSTGRESQL_DATA_DIR'] = 'data/test0' conf = config.Config('postgres0.yml') self.p = Patroni(conf) def tearDown(self): logging.getLogger().handlers[:] = self._handlers @patch('patroni.dcs.AbstractDCS.get_cluster', Mock(side_effect=[None, DCSError('foo'), None])) def test_load_dynamic_configuration(self): self.p.config._dynamic_configuration = {} self.p.load_dynamic_configuration() self.p.load_dynamic_configuration() @patch('sys.argv', ['patroni.py', 'postgres0.yml']) @patch('time.sleep', Mock(side_effect=SleepException)) @patch.object(etcd.Client, 'delete', Mock()) @patch.object(Client, 'machines', PropertyMock(return_value=['http://remotehost:2379'])) @patch.object(Thread, 'join', Mock()) def test_patroni_patroni_main(self): with patch('subprocess.call', Mock(return_value=1)): with patch.object(Patroni, 'run', Mock(side_effect=SleepException)): os.environ['PATRONI_POSTGRESQL_DATA_DIR'] = 'data/test0' self.assertRaises(SleepException, patroni_main) with patch.object(Patroni, 'run', Mock(side_effect=KeyboardInterrupt())): with patch('patroni.ha.Ha.is_paused', Mock(return_value=True)): os.environ['PATRONI_POSTGRESQL_DATA_DIR'] = 'data/test0' patroni_main() @patch('os.getpid') @patch('multiprocessing.Process') @patch('patroni.patroni_main', Mock()) def test_patroni_main(self, mock_process, mock_getpid): mock_getpid.return_value = 2 _main() mock_getpid.return_value = 1 def mock_signal(signo, handler): handler(signo, None) with patch('signal.signal', mock_signal): with patch('os.waitpid', Mock(side_effect=[(1, 0), (0, 0)])): _main() with patch('os.waitpid', Mock(side_effect=OSError)): _main() ref = {'passtochild': lambda signo, stack_frame: 0} def mock_sighup(signo, handler): if hasattr(signal, 'SIGHUP') and signo == signal.SIGHUP: ref['passtochild'] = handler def mock_join(): ref['passtochild'](0, None) mock_process.return_value.join = mock_join with patch('signal.signal', mock_sighup), patch('os.kill', Mock()): self.assertIsNone(_main()) @patch('patroni.config.Config.save_cache', Mock()) @patch('patroni.config.Config.reload_local_configuration', Mock(return_value=True)) @patch('patroni.ha.Ha.is_leader', Mock(return_value=True)) @patch.object(Postgresql, 'state', PropertyMock(return_value='running')) @patch.object(Postgresql, 'data_directory_empty', Mock(return_value=False)) def test_run(self): self.p.postgresql.set_role('replica') self.p.sighup_handler() self.p.ha.dcs.watch = Mock(side_effect=SleepException) self.p.api.start = Mock() self.p.logger.start = Mock() self.p.config._dynamic_configuration = {} self.assertRaises(SleepException, self.p.run) with patch('patroni.config.Config.reload_local_configuration', Mock(return_value=False)): self.p.sighup_handler() self.assertRaises(SleepException, self.p.run) with patch('patroni.config.Config.set_dynamic_configuration', Mock(return_value=True)): self.assertRaises(SleepException, self.p.run) with patch('patroni.postgresql.Postgresql.data_directory_empty', Mock(return_value=False)): self.assertRaises(SleepException, self.p.run) def test_sigterm_handler(self): self.assertRaises(SystemExit, self.p.sigterm_handler) def test_schedule_next_run(self): self.p.ha.cluster = Mock() self.p.ha.dcs.watch = Mock(return_value=True) self.p.schedule_next_run() self.p.next_run = time.time() - self.p.dcs.loop_wait - 1 self.p.schedule_next_run() def test_noloadbalance(self): self.p.tags['noloadbalance'] = True self.assertTrue(self.p.noloadbalance) def test_nofailover(self): self.p.tags['nofailover'] = True self.assertTrue(self.p.nofailover) self.p.tags['nofailover'] = None self.assertFalse(self.p.nofailover) def test_replicatefrom(self): self.assertIsNone(self.p.replicatefrom) self.p.tags['replicatefrom'] = 'foo' self.assertEqual(self.p.replicatefrom, 'foo') def test_reload_config(self): self.p.reload_config() self.p.get_tags = Mock(side_effect=Exception) self.p.reload_config() def test_nosync(self): self.p.tags['nosync'] = True self.assertTrue(self.p.nosync) self.p.tags['nosync'] = None self.assertFalse(self.p.nosync) @patch.object(Thread, 'join', Mock()) def test_shutdown(self): self.p.api.shutdown = Mock(side_effect=Exception) self.p.ha.shutdown = Mock(side_effect=Exception) self.p.shutdown() def test_check_psycopg2(self): with patch.object(builtins, '__import__', Mock(side_effect=ImportError)): self.assertRaises(SystemExit, check_psycopg2) with patch('psycopg2.__version__', '2.5.3.dev1 a b c'): self.assertRaises(SystemExit, check_psycopg2) patroni-1.6.4/tests/test_postgresql.py000066400000000000000000001016051361356115100201610ustar00rootroot00000000000000import mock # for the mock.call method, importing it without a namespace breaks python3 import os import psycopg2 import re import subprocess import time from mock import Mock, MagicMock, PropertyMock, patch, mock_open from patroni.async_executor import CriticalTask from patroni.dcs import Cluster, ClusterConfig, Member, RemoteMember, SyncState from patroni.exceptions import PostgresConnectionException, PatroniException from patroni.postgresql import Postgresql, STATE_REJECT, STATE_NO_RESPONSE from patroni.postgresql.postmaster import PostmasterProcess from patroni.postgresql.slots import SlotsHandler from patroni.utils import RetryFailedError from six.moves import builtins from threading import Thread, current_thread from . import BaseTestPostgresql, MockCursor, MockPostmaster, psycopg2_connect mtime_ret = {} def mock_mtime(filename): if filename not in mtime_ret: mtime_ret[filename] = time.time() else: mtime_ret[filename] += 1 return mtime_ret[filename] def pg_controldata_string(*args, **kwargs): return b""" pg_control version number: 942 Catalog version number: 201509161 Database system identifier: 6200971513092291716 Database cluster state: shut down in recovery pg_control last modified: Fri Oct 2 10:57:06 2015 Latest checkpoint location: 0/30000C8 Prior checkpoint location: 0/2000060 Latest checkpoint's REDO location: 0/3000090 Latest checkpoint's REDO WAL file: 000000020000000000000003 Latest checkpoint's TimeLineID: 2 Latest checkpoint's PrevTimeLineID: 2 Latest checkpoint's full_page_writes: on Latest checkpoint's NextXID: 0/943 Latest checkpoint's NextOID: 24576 Latest checkpoint's NextMultiXactId: 1 Latest checkpoint's NextMultiOffset: 0 Latest checkpoint's oldestXID: 931 Latest checkpoint's oldestXID's DB: 1 Latest checkpoint's oldestActiveXID: 943 Latest checkpoint's oldestMultiXid: 1 Latest checkpoint's oldestMulti's DB: 1 Latest checkpoint's oldestCommitTs: 0 Latest checkpoint's newestCommitTs: 0 Time of latest checkpoint: Fri Oct 2 10:56:54 2015 Fake LSN counter for unlogged rels: 0/1 Minimum recovery ending location: 0/30241F8 Min recovery ending loc's timeline: 2 Backup start location: 0/0 Backup end location: 0/0 End-of-backup record required: no wal_level setting: hot_standby Current wal_log_hints setting: on Current max_connections setting: 100 Current max_worker_processes setting: 8 Current max_prepared_xacts setting: 0 Current max_locks_per_xact setting: 64 Current track_commit_timestamp setting: off Maximum data alignment: 8 Database block size: 8192 Blocks per segment of large relation: 131072 WAL block size: 8192 Bytes per WAL segment: 16777216 Maximum length of identifiers: 64 Maximum columns in an index: 32 Maximum size of a TOAST chunk: 1996 Size of a large-object chunk: 2048 Date/time type storage: 64-bit integers Float4 argument passing: by value Float8 argument passing: by value Data page checksum version: 0 """ @patch('subprocess.call', Mock(return_value=0)) @patch('psycopg2.connect', psycopg2_connect) class TestPostgresql(BaseTestPostgresql): @patch('subprocess.call', Mock(return_value=0)) @patch('os.rename', Mock()) @patch('patroni.postgresql.CallbackExecutor', Mock()) @patch.object(Postgresql, 'get_major_version', Mock(return_value=120000)) @patch.object(Postgresql, 'is_running', Mock(return_value=True)) def setUp(self): super(TestPostgresql, self).setUp() self.p.config.write_postgresql_conf() self.p._callback_executor = Mock() @patch('subprocess.Popen') @patch.object(Postgresql, 'wait_for_startup') @patch.object(Postgresql, 'wait_for_port_open') @patch.object(Postgresql, 'is_running') @patch.object(Postgresql, 'controldata', Mock()) def test_start(self, mock_is_running, mock_wait_for_port_open, mock_wait_for_startup, mock_popen): mock_is_running.return_value = MockPostmaster() mock_wait_for_port_open.return_value = True mock_wait_for_startup.return_value = False mock_popen.return_value.stdout.readline.return_value = '123' self.assertTrue(self.p.start()) mock_is_running.return_value = None mock_postmaster = MockPostmaster() with patch.object(PostmasterProcess, 'start', return_value=mock_postmaster): pg_conf = os.path.join(self.p.data_dir, 'postgresql.conf') open(pg_conf, 'w').close() self.assertFalse(self.p.start(task=CriticalTask())) with open(pg_conf) as f: lines = f.readlines() self.assertTrue("f.oo = 'bar'\n" in lines) mock_wait_for_startup.return_value = None self.assertFalse(self.p.start(10)) self.assertIsNone(self.p.start()) mock_wait_for_port_open.return_value = False self.assertFalse(self.p.start()) task = CriticalTask() task.cancel() self.assertFalse(self.p.start(task=task)) self.p.cancellable.cancel() self.assertFalse(self.p.start()) @patch.object(Postgresql, 'pg_isready') @patch('patroni.postgresql.polling_loop', Mock(return_value=range(1))) def test_wait_for_port_open(self, mock_pg_isready): mock_pg_isready.return_value = STATE_NO_RESPONSE mock_postmaster = MockPostmaster(is_running=False) # No pid file and postmaster death self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1)) mock_postmaster.is_running.return_value = True # timeout self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1)) # pg_isready failure mock_pg_isready.return_value = 'garbage' self.assertTrue(self.p.wait_for_port_open(mock_postmaster, 1)) # cancelled self.p.cancellable.cancel() self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1)) @patch('time.sleep', Mock()) @patch.object(Postgresql, 'is_running') @patch.object(Postgresql, '_wait_for_connection_close', Mock()) def test_stop(self, mock_is_running): # Postmaster is not running mock_callback = Mock() mock_is_running.return_value = None self.assertTrue(self.p.stop(on_safepoint=mock_callback)) mock_callback.assert_called() # Is running, stopped successfully mock_is_running.return_value = mock_postmaster = MockPostmaster() mock_callback.reset_mock() self.assertTrue(self.p.stop(on_safepoint=mock_callback)) mock_callback.assert_called() mock_postmaster.signal_stop.assert_called() # Stop signal failed mock_postmaster.signal_stop.return_value = False self.assertFalse(self.p.stop()) # Stop signal failed to find process mock_postmaster.signal_stop.return_value = True mock_callback.reset_mock() self.assertTrue(self.p.stop(on_safepoint=mock_callback)) mock_callback.assert_called() def test_restart(self): self.p.start = Mock(return_value=False) self.assertFalse(self.p.restart()) self.assertEqual(self.p.state, 'restart failed (restarting)') @patch('os.chmod', Mock()) @patch.object(builtins, 'open', MagicMock()) def test_write_pgpass(self): self.p.config.write_pgpass({'host': 'localhost', 'port': '5432', 'user': 'foo'}) self.p.config.write_pgpass({'host': 'localhost', 'port': '5432', 'user': 'foo', 'password': 'bar'}) def test_checkpoint(self): with patch.object(MockCursor, 'fetchone', Mock(return_value=(True, ))): self.assertEqual(self.p.checkpoint({'user': 'postgres'}), 'is_in_recovery=true') with patch.object(MockCursor, 'execute', Mock(return_value=None)): self.assertIsNone(self.p.checkpoint()) self.assertEqual(self.p.checkpoint(), 'not accessible or not healty') @patch('patroni.postgresql.config.mtime', mock_mtime) @patch('patroni.postgresql.config.ConfigHandler._get_pg_settings') def test_check_recovery_conf(self, mock_get_pg_settings): mock_get_pg_settings.return_value = { 'primary_conninfo': ['primary_conninfo', 'foo=', None, 'string', 'postmaster', self.p.config._auto_conf], 'recovery_min_apply_delay': ['recovery_min_apply_delay', '0', 'ms', 'integer', 'sighup', 'foo'] } self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) self.p.config.write_recovery_conf({'standby_mode': 'on'}) self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) mock_get_pg_settings.return_value['primary_conninfo'][1] = '' mock_get_pg_settings.return_value['recovery_min_apply_delay'][1] = '1' self.assertEqual(self.p.config.check_recovery_conf(None), (False, False)) mock_get_pg_settings.return_value['recovery_min_apply_delay'][5] = self.p.config._auto_conf self.assertEqual(self.p.config.check_recovery_conf(None), (True, False)) mock_get_pg_settings.return_value['recovery_min_apply_delay'][1] = '0' self.assertEqual(self.p.config.check_recovery_conf(None), (False, False)) conninfo = {'host': '1', 'password': 'bar'} with patch('patroni.postgresql.config.ConfigHandler.primary_conninfo_params', Mock(return_value=conninfo)): mock_get_pg_settings.return_value['recovery_min_apply_delay'][1] = '1' self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) mock_get_pg_settings.return_value['primary_conninfo'][1] = 'host=1 passfile='\ + re.sub(r'([\'\\ ])', r'\\\1', self.p.config._pgpass) mock_get_pg_settings.return_value['recovery_min_apply_delay'][1] = '0' self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) self.p.config.write_recovery_conf({'standby_mode': 'on', 'primary_conninfo': conninfo.copy()}) self.p.config.write_postgresql_conf() self.assertEqual(self.p.config.check_recovery_conf(None), (False, False)) @patch.object(Postgresql, 'major_version', PropertyMock(return_value=120000)) @patch.object(Postgresql, 'is_running', MockPostmaster) @patch.object(MockPostmaster, 'create_time', Mock(return_value=1234567), create=True) @patch('patroni.postgresql.config.ConfigHandler._get_pg_settings') def test__read_recovery_params(self, mock_get_pg_settings): mock_get_pg_settings.return_value = {'primary_conninfo': ['primary_conninfo', '', None, 'string', 'postmaster', self.p.config._postgresql_conf]} self.p.config.write_recovery_conf({'standby_mode': 'on', 'primary_conninfo': {'password': 'foo'}}) self.p.config.write_postgresql_conf() self.assertEqual(self.p.config.check_recovery_conf(None), (False, False)) self.assertEqual(self.p.config.check_recovery_conf(None), (False, False)) mock_get_pg_settings.side_effect = Exception with patch('patroni.postgresql.config.mtime', mock_mtime): self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) @patch.object(Postgresql, 'major_version', PropertyMock(return_value=100000)) def test__read_recovery_params_pre_v12(self): self.p.config.write_recovery_conf({'standby_mode': 'on', 'primary_conninfo': {'password': 'foo'}}) self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) self.p.config.write_recovery_conf({'standby_mode': '\n'}) with patch('patroni.postgresql.config.mtime', mock_mtime): self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) def test_write_postgresql_and_sanitize_auto_conf(self): read_data = 'primary_conninfo = foo\nfoo = bar\n' with open(os.path.join(self.p.data_dir, 'postgresql.auto.conf'), 'w') as f: f.write(read_data) mock_read_auto = mock_open(read_data=read_data) mock_read_auto.return_value.__iter__ = lambda o: iter(o.readline, '') with patch.object(builtins, 'open', Mock(side_effect=[mock_open()(), mock_read_auto(), IOError])),\ patch('os.chmod', Mock()): self.p.config.write_postgresql_conf() with patch.object(builtins, 'open', Mock(side_effect=[mock_open()(), IOError])), patch('os.chmod', Mock()): self.p.config.write_postgresql_conf() self.p.config.write_recovery_conf({'foo': 'bar'}) self.p.config.write_postgresql_conf() @patch.object(Postgresql, 'is_running', Mock(return_value=False)) @patch.object(Postgresql, 'start', Mock()) def test_follow(self): self.p.call_nowait('on_start') m = RemoteMember('1', {'restore_command': '2', 'primary_slot_name': 'foo', 'conn_kwargs': {'host': 'bar'}}) self.p.follow(m) @patch.object(Postgresql, 'is_running', Mock(return_value=True)) def test_sync_replication_slots(self): self.p.start() config = ClusterConfig(1, {'slots': {'ls': {'database': 'a', 'plugin': 'b'}, 'A': 0, 'test_3': 0, 'b': {'type': 'logical', 'plugin': '1'}}}, 1) cluster = Cluster(True, config, self.leader, 0, [self.me, self.other, self.leadermem], None, None, None) with mock.patch('patroni.postgresql.Postgresql._query', Mock(side_effect=psycopg2.OperationalError)): self.p.slots_handler.sync_replication_slots(cluster) self.p.slots_handler.sync_replication_slots(cluster) with mock.patch('patroni.postgresql.Postgresql.role', new_callable=PropertyMock(return_value='replica')): self.p.slots_handler.sync_replication_slots(cluster) with patch.object(SlotsHandler, 'drop_replication_slot', Mock(return_value=True)),\ patch('patroni.dcs.logger.error', new_callable=Mock()) as errorlog_mock: alias1 = Member(0, 'test-3', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5436/postgres'}) alias2 = Member(0, 'test.3', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5436/postgres'}) cluster.members.extend([alias1, alias2]) self.p.slots_handler.sync_replication_slots(cluster) self.assertEqual(errorlog_mock.call_count, 5) ca = errorlog_mock.call_args_list[0][0][1] self.assertTrue("test-3" in ca, "non matching {0}".format(ca)) self.assertTrue("test.3" in ca, "non matching {0}".format(ca)) @patch.object(MockCursor, 'execute', Mock(side_effect=psycopg2.OperationalError)) def test__query(self): self.assertRaises(PostgresConnectionException, self.p._query, 'blabla') self.p._state = 'restarting' self.assertRaises(RetryFailedError, self.p._query, 'blabla') def test_query(self): self.p.query('select 1') self.assertRaises(PostgresConnectionException, self.p.query, 'RetryFailedError') self.assertRaises(psycopg2.ProgrammingError, self.p.query, 'blabla') @patch.object(Postgresql, 'pg_isready', Mock(return_value=STATE_REJECT)) def test_is_leader(self): self.assertTrue(self.p.is_leader()) self.p.reset_cluster_info_state() with patch.object(Postgresql, '_query', Mock(side_effect=RetryFailedError(''))): self.assertRaises(PostgresConnectionException, self.p.is_leader) def test_reload(self): self.assertTrue(self.p.reload()) @patch.object(Postgresql, 'is_running') def test_is_healthy(self, mock_is_running): mock_is_running.return_value = True self.assertTrue(self.p.is_healthy()) mock_is_running.return_value = False self.assertFalse(self.p.is_healthy()) def test_promote(self): self.p.set_role('replica') self.assertIsNone(self.p.promote(0)) self.assertTrue(self.p.promote(0)) def test_timeline_wal_position(self): self.assertEqual(self.p.timeline_wal_position(), (1, 2, 1)) Thread(target=self.p.timeline_wal_position).start() @patch.object(PostmasterProcess, 'from_pidfile') def test_is_running(self, mock_frompidfile): # Cached postmaster running mock_postmaster = self.p._postmaster_proc = MockPostmaster() self.assertEqual(self.p.is_running(), mock_postmaster) # Cached postmaster not running, no postmaster running mock_postmaster.is_running.return_value = False mock_frompidfile.return_value = None self.assertEqual(self.p.is_running(), None) self.assertEqual(self.p._postmaster_proc, None) # No cached postmaster, postmaster running mock_frompidfile.return_value = mock_postmaster2 = MockPostmaster() self.assertEqual(self.p.is_running(), mock_postmaster2) self.assertEqual(self.p._postmaster_proc, mock_postmaster2) @patch('shlex.split', Mock(side_effect=OSError)) def test_call_nowait(self): self.p.set_role('replica') self.assertIsNone(self.p.call_nowait('on_start')) self.p.bootstrapping = True self.assertIsNone(self.p.call_nowait('on_start')) def test_non_existing_callback(self): self.assertFalse(self.p.call_nowait('foobar')) @patch.object(Postgresql, 'is_running', Mock(return_value=MockPostmaster())) def test_is_leader_exception(self): self.p.start() self.p.query = Mock(side_effect=psycopg2.OperationalError("not supported")) self.assertTrue(self.p.stop()) @patch('os.rename', Mock()) @patch('os.path.isdir', Mock(return_value=True)) def test_move_data_directory(self): self.p.move_data_directory() with patch('os.rename', Mock(side_effect=OSError)): self.p.move_data_directory() @patch('os.listdir', Mock(return_value=['recovery.conf'])) @patch('os.path.exists', Mock(return_value=True)) @patch.object(Postgresql, 'controldata', Mock()) def test_get_postgres_role_from_data_directory(self): self.assertEqual(self.p.get_postgres_role_from_data_directory(), 'replica') def test_remove_data_directory(self): def _symlink(src, dst): try: os.symlink(src, dst) except OSError: if os.name == 'nt': # os.symlink under Windows needs admin rights skip it pass os.makedirs(os.path.join(self.p.data_dir, 'foo')) _symlink('foo', os.path.join(self.p.data_dir, 'pg_wal')) self.p.remove_data_directory() open(self.p.data_dir, 'w').close() self.p.remove_data_directory() _symlink('unexisting', self.p.data_dir) with patch('os.unlink', Mock(side_effect=OSError)): self.p.remove_data_directory() self.p.remove_data_directory() @patch('patroni.postgresql.Postgresql._version_file_exists', Mock(return_value=True)) def test_controldata(self): with patch('subprocess.check_output', Mock(return_value=0, side_effect=pg_controldata_string)): data = self.p.controldata() self.assertEqual(len(data), 50) self.assertEqual(data['Database cluster state'], 'shut down in recovery') self.assertEqual(data['wal_log_hints setting'], 'on') self.assertEqual(int(data['Database block size']), 8192) with patch('subprocess.check_output', Mock(side_effect=subprocess.CalledProcessError(1, ''))): self.assertEqual(self.p.controldata(), {}) @patch('patroni.postgresql.Postgresql._version_file_exists', Mock(return_value=True)) @patch('subprocess.check_output', MagicMock(return_value=0, side_effect=pg_controldata_string)) def test_sysid(self): self.assertEqual(self.p.sysid, "6200971513092291716") @patch('os.path.isfile', Mock(return_value=True)) @patch('shutil.copy', Mock(side_effect=IOError)) def test_save_configuration_files(self): self.p.config.save_configuration_files() @patch('os.path.isfile', Mock(side_effect=[False, True])) @patch('shutil.copy', Mock(side_effect=IOError)) def test_restore_configuration_files(self): self.p.config.restore_configuration_files() def test_can_create_replica_without_replication_connection(self): self.p.config._config['create_replica_method'] = [] self.assertFalse(self.p.can_create_replica_without_replication_connection()) self.p.config._config['create_replica_method'] = ['wale', 'basebackup'] self.p.config._config['wale'] = {'command': 'foo', 'no_master': 1} self.assertTrue(self.p.can_create_replica_without_replication_connection()) def test_replica_method_can_work_without_replication_connection(self): self.assertFalse(self.p.replica_method_can_work_without_replication_connection('basebackup')) self.assertFalse(self.p.replica_method_can_work_without_replication_connection('foobar')) self.p.config._config['foo'] = {'command': 'bar', 'no_master': 1} self.assertTrue(self.p.replica_method_can_work_without_replication_connection('foo')) self.p.config._config['foo'] = {'command': 'bar'} self.assertFalse(self.p.replica_method_can_work_without_replication_connection('foo')) @patch('time.sleep', Mock()) @patch.object(Postgresql, 'is_running', Mock(return_value=True)) @patch.object(MockCursor, 'fetchone') def test_reload_config(self, mock_fetchone): mock_fetchone.return_value = (1,) parameters = self._PARAMETERS.copy() parameters.pop('f.oo') parameters['wal_buffers'] = '512' config = {'pg_hba': [''], 'pg_ident': [''], 'use_unix_socket': True, 'authentication': {}, 'retry_timeout': 10, 'listen': '*', 'krbsrvname': 'postgres', 'parameters': parameters} self.p.reload_config(config) mock_fetchone.side_effect = Exception parameters['b.ar'] = 'bar' self.p.reload_config(config) parameters['autovacuum'] = 'on' self.p.reload_config(config) parameters['autovacuum'] = 'off' parameters.pop('search_path') config['listen'] = '*:5433' self.p.reload_config(config) parameters['unix_socket_directories'] = '.' self.p.reload_config(config) self.p.config.resolve_connection_addresses() @patch.object(Postgresql, '_version_file_exists', Mock(return_value=True)) def test_get_major_version(self): with patch.object(builtins, 'open', mock_open(read_data='9.4')): self.assertEqual(self.p.get_major_version(), 90400) with patch.object(builtins, 'open', Mock(side_effect=Exception)): self.assertEqual(self.p.get_major_version(), 0) def test_postmaster_start_time(self): with patch.object(MockCursor, "fetchone", Mock(return_value=('foo', True, '', '', '', '', False))): self.assertEqual(self.p.postmaster_start_time(), 'foo') t = Thread(target=self.p.postmaster_start_time) t.start() t.join() with patch.object(MockCursor, "execute", side_effect=psycopg2.Error): self.assertIsNone(self.p.postmaster_start_time()) def test_check_for_startup(self): with patch('subprocess.call', return_value=0): self.p._state = 'starting' self.assertFalse(self.p.check_for_startup()) self.assertEqual(self.p.state, 'running') with patch('subprocess.call', return_value=1): self.p._state = 'starting' self.assertTrue(self.p.check_for_startup()) self.assertEqual(self.p.state, 'starting') with patch('subprocess.call', return_value=2): self.p._state = 'starting' self.assertFalse(self.p.check_for_startup()) self.assertEqual(self.p.state, 'start failed') with patch('subprocess.call', return_value=0): self.p._state = 'running' self.assertFalse(self.p.check_for_startup()) self.assertEqual(self.p.state, 'running') with patch('subprocess.call', return_value=127): self.p._state = 'running' self.assertFalse(self.p.check_for_startup()) self.assertEqual(self.p.state, 'running') self.p._state = 'starting' self.assertFalse(self.p.check_for_startup()) self.assertEqual(self.p.state, 'running') def test_wait_for_startup(self): state = {'sleeps': 0, 'num_rejects': 0, 'final_return': 0} self.__thread_ident = current_thread().ident def increment_sleeps(*args): if current_thread().ident == self.__thread_ident: print("Sleep") state['sleeps'] += 1 def isready_return(*args): ret = 1 if state['sleeps'] < state['num_rejects'] else state['final_return'] print("Isready {0} {1}".format(ret, state)) return ret def time_in_state(*args): return state['sleeps'] with patch('subprocess.call', side_effect=isready_return): with patch('time.sleep', side_effect=increment_sleeps): self.p.time_in_state = Mock(side_effect=time_in_state) self.p._state = 'stopped' self.assertTrue(self.p.wait_for_startup()) self.assertEqual(state['sleeps'], 0) self.p._state = 'starting' state['num_rejects'] = 5 self.assertTrue(self.p.wait_for_startup()) self.assertEqual(state['sleeps'], 5) self.p._state = 'starting' state['sleeps'] = 0 state['final_return'] = 2 self.assertFalse(self.p.wait_for_startup()) self.p._state = 'starting' state['sleeps'] = 0 state['final_return'] = 0 self.assertFalse(self.p.wait_for_startup(timeout=2)) self.assertEqual(state['sleeps'], 3) with patch.object(Postgresql, 'check_startup_state_changed', Mock(return_value=False)): self.p.cancellable.cancel() self.p._state = 'starting' self.assertIsNone(self.p.wait_for_startup()) def test_pick_sync_standby(self): cluster = Cluster(True, None, self.leader, 0, [self.me, self.other, self.leadermem], None, SyncState(0, self.me.name, self.leadermem.name), None) with patch.object(Postgresql, "query", return_value=[ (self.leadermem.name, 'streaming', 'sync'), (self.me.name, 'streaming', 'async'), (self.other.name, 'streaming', 'async'), ]): self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.leadermem.name, True)) with patch.object(Postgresql, "query", return_value=[ (self.me.name, 'streaming', 'async'), (self.leadermem.name, 'streaming', 'potential'), (self.other.name, 'streaming', 'async'), ]): self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.leadermem.name, False)) with patch.object(Postgresql, "query", return_value=[ (self.me.name, 'streaming', 'async'), (self.other.name, 'streaming', 'async'), ]): self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.me.name, False)) with patch.object(Postgresql, "query", return_value=[ ('missing', 'streaming', 'sync'), (self.me.name, 'streaming', 'async'), (self.other.name, 'streaming', 'async'), ]): self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.me.name, False)) with patch.object(Postgresql, "query", return_value=[]): self.assertEqual(self.p.pick_synchronous_standby(cluster), (None, False)) def test_set_sync_standby(self): def value_in_conf(): with open(os.path.join(self.p.data_dir, 'postgresql.conf')) as f: for line in f: if line.startswith('synchronous_standby_names'): return line.strip() mock_reload = self.p.reload = Mock() self.p.config.set_synchronous_standby('n1') self.assertEqual(value_in_conf(), "synchronous_standby_names = 'n1'") mock_reload.assert_called() mock_reload.reset_mock() self.p.config.set_synchronous_standby('n1') mock_reload.assert_not_called() self.assertEqual(value_in_conf(), "synchronous_standby_names = 'n1'") self.p.config.set_synchronous_standby('n2') mock_reload.assert_called() self.assertEqual(value_in_conf(), "synchronous_standby_names = 'n2'") mock_reload.reset_mock() self.p.config.set_synchronous_standby(None) mock_reload.assert_called() self.assertEqual(value_in_conf(), None) def test_get_server_parameters(self): config = {'synchronous_mode': True, 'parameters': {'wal_level': 'hot_standby'}, 'listen': '0'} self.p.config.get_server_parameters(config) config['synchronous_mode_strict'] = True self.p.config.get_server_parameters(config) self.p.config.set_synchronous_standby('foo') self.assertTrue(str(self.p.config.get_server_parameters(config)).startswith('{')) @patch('time.sleep', Mock()) def test__wait_for_connection_close(self): mock_postmaster = MockPostmaster() with patch.object(Postgresql, 'is_running', Mock(return_value=mock_postmaster)): mock_postmaster.is_running.side_effect = [True, False, False] mock_callback = Mock() self.p.stop(on_safepoint=mock_callback) mock_postmaster.is_running.side_effect = [True, False, False] with patch.object(MockCursor, "execute", Mock(side_effect=psycopg2.Error)): self.p.stop(on_safepoint=mock_callback) def test_terminate_starting_postmaster(self): mock_postmaster = MockPostmaster() self.p.terminate_starting_postmaster(mock_postmaster) mock_postmaster.signal_stop.assert_called() mock_postmaster.wait.assert_called() def test_read_postmaster_opts(self): m = mock_open(read_data='/usr/lib/postgres/9.6/bin/postgres "-D" "data/postgresql0" \ "--listen_addresses=127.0.0.1" "--port=5432" "--hot_standby=on" "--wal_level=hot_standby" \ "--wal_log_hints=on" "--max_wal_senders=5" "--max_replication_slots=5"\n') with patch.object(builtins, 'open', m): data = self.p.read_postmaster_opts() self.assertEqual(data['wal_level'], 'hot_standby') self.assertEqual(int(data['max_replication_slots']), 5) self.assertEqual(data.get('D'), None) m.side_effect = IOError data = self.p.read_postmaster_opts() self.assertEqual(data, dict()) @patch('psutil.Popen') def test_single_user_mode(self, subprocess_popen_mock): subprocess_popen_mock.return_value.wait.return_value = 0 self.assertEqual(self.p.single_user_mode('CHECKPOINT', {'archive_mode': 'on'}), 0) @patch('os.listdir', Mock(side_effect=[OSError, ['a', 'b']])) @patch('os.unlink', Mock(side_effect=OSError)) @patch('os.remove', Mock()) @patch('os.path.islink', Mock(side_effect=[True, False])) @patch('os.path.isfile', Mock(return_value=True)) def test_cleanup_archive_status(self): self.p.cleanup_archive_status() self.p.cleanup_archive_status() @patch('os.unlink', Mock()) @patch('os.listdir', Mock(return_value=[])) @patch('os.path.isfile', Mock(return_value=True)) @patch.object(Postgresql, 'read_postmaster_opts', Mock(return_value={})) @patch.object(Postgresql, 'single_user_mode', Mock(return_value=0)) def test_fix_cluster_state(self): self.assertTrue(self.p.fix_cluster_state()) def test_replica_cached_timeline(self): self.assertEqual(self.p.replica_cached_timeline(1), 2) def test_get_master_timeline(self): self.assertEqual(self.p.get_master_timeline(), 1) @patch.object(Postgresql, 'get_postgres_role_from_data_directory', Mock(return_value='replica')) def test__build_effective_configuration(self): with patch.object(Postgresql, 'controldata', Mock(return_value={'max_connections setting': '200', 'max_worker_processes setting': '20', 'max_prepared_xacts setting': '100', 'max_locks_per_xact setting': '100', 'max_wal_senders setting': 10})): self.p.cancellable.cancel() self.assertFalse(self.p.start()) self.assertTrue(self.p.pending_restart) @patch('os.path.exists', Mock(return_value=True)) @patch('os.path.isfile', Mock(return_value=False)) def test_pgpass_is_dir(self): self.assertRaises(PatroniException, self.setUp) patroni-1.6.4/tests/test_postmaster.py000066400000000000000000000136551361356115100201660ustar00rootroot00000000000000import multiprocessing import psutil import unittest from mock import Mock, patch, mock_open from patroni.postgresql.postmaster import PostmasterProcess from six.moves import builtins class MockProcess(object): def __init__(self, target, args): self.target = target self.args = args def start(self): self.target(*self.args) def join(self): pass class TestPostmasterProcess(unittest.TestCase): @patch('psutil.Process.__init__', Mock()) def test_init(self): proc = PostmasterProcess(-123) self.assertTrue(proc.is_single_user) @patch('psutil.Process.create_time') @patch('psutil.Process.__init__') @patch.object(PostmasterProcess, '_read_postmaster_pidfile') def test_from_pidfile(self, mock_read, mock_init, mock_create_time): mock_init.side_effect = psutil.NoSuchProcess(123) mock_read.return_value = {} self.assertIsNone(PostmasterProcess.from_pidfile('')) mock_read.return_value = {"pid": "foo"} self.assertIsNone(PostmasterProcess.from_pidfile('')) mock_read.return_value = {"pid": "123"} self.assertIsNone(PostmasterProcess.from_pidfile('')) mock_init.side_effect = None with patch.object(psutil.Process, 'pid', 123), \ patch.object(psutil.Process, 'ppid', return_value=124), \ patch('os.getpid', return_value=125) as mock_ospid, \ patch('os.getppid', return_value=126): self.assertIsNotNone(PostmasterProcess.from_pidfile('')) mock_create_time.return_value = 100000 mock_read.return_value = {"pid": "123", "start_time": "200000"} self.assertIsNone(PostmasterProcess.from_pidfile('')) mock_read.return_value = {"pid": "123", "start_time": "foobar"} self.assertIsNotNone(PostmasterProcess.from_pidfile('')) mock_ospid.return_value = 123 mock_read.return_value = {"pid": "123", "start_time": "100000"} self.assertIsNone(PostmasterProcess.from_pidfile('')) @patch('psutil.Process.__init__') def test_from_pid(self, mock_init): mock_init.side_effect = psutil.NoSuchProcess(123) self.assertEqual(PostmasterProcess.from_pid(123), None) mock_init.side_effect = None self.assertNotEqual(PostmasterProcess.from_pid(123), None) @patch('psutil.Process.__init__', Mock()) @patch('psutil.Process.send_signal') @patch('psutil.Process.pid', Mock(return_value=123)) @patch('os.name', 'posix') @patch('signal.SIGQUIT', 3, create=True) def test_signal_stop(self, mock_send_signal): proc = PostmasterProcess(-123) self.assertEqual(proc.signal_stop('immediate'), False) mock_send_signal.side_effect = [None, psutil.NoSuchProcess(123), psutil.AccessDenied()] proc = PostmasterProcess(123) self.assertEqual(proc.signal_stop('immediate'), None) self.assertEqual(proc.signal_stop('immediate'), True) self.assertEqual(proc.signal_stop('immediate'), False) @patch('psutil.Process.__init__', Mock()) @patch('patroni.postgresql.postmaster.os') @patch('subprocess.call', Mock(side_effect=[0, OSError, 1])) @patch('psutil.Process.pid', Mock(return_value=123)) @patch('psutil.Process.is_running', Mock(return_value=False)) def test_signal_stop_nt(self, mock_os): mock_os.configure_mock(name="nt") proc = PostmasterProcess(-123) self.assertEqual(proc.signal_stop('immediate'), False) proc = PostmasterProcess(123) self.assertEqual(proc.signal_stop('immediate'), None) self.assertEqual(proc.signal_stop('immediate'), False) self.assertEqual(proc.signal_stop('immediate'), True) @patch('psutil.Process.__init__', Mock()) @patch('psutil.wait_procs') def test_wait_for_user_backends_to_close(self, mock_wait): c1 = Mock() c1.cmdline = Mock(return_value=["postgres: startup process "]) c2 = Mock() c2.cmdline = Mock(return_value=["postgres: postgres postgres [local] idle"]) c3 = Mock() c3.cmdline = Mock(side_effect=psutil.NoSuchProcess(123)) with patch('psutil.Process.children', Mock(return_value=[c1, c2, c3])): proc = PostmasterProcess(123) self.assertIsNone(proc.wait_for_user_backends_to_close()) mock_wait.assert_called_with([c2]) with patch('psutil.Process.children', Mock(side_effect=psutil.NoSuchProcess(123))): proc = PostmasterProcess(123) self.assertIsNone(proc.wait_for_user_backends_to_close()) @patch('subprocess.Popen') @patch('os.setsid', Mock(), create=True) @patch('multiprocessing.Process', MockProcess) @patch('multiprocessing.get_context', Mock(return_value=multiprocessing), create=True) @patch.object(PostmasterProcess, 'from_pid') @patch.object(PostmasterProcess, '_from_pidfile') def test_start(self, mock_frompidfile, mock_frompid, mock_popen): mock_frompidfile.return_value._is_postmaster_process.return_value = False mock_frompid.return_value = "proc 123" mock_popen.return_value.pid = 123 self.assertEqual(PostmasterProcess.start('true', '/tmp', '/tmp/test.conf', []), "proc 123") mock_frompid.assert_called_with(123) mock_frompidfile.side_effect = psutil.NoSuchProcess(123) self.assertEqual(PostmasterProcess.start('true', '/tmp', '/tmp/test.conf', []), "proc 123") mock_popen.side_effect = Exception self.assertIsNone(PostmasterProcess.start('true', '/tmp', '/tmp/test.conf', [])) @patch('psutil.Process.__init__', Mock(side_effect=psutil.NoSuchProcess(123))) def test_read_postmaster_pidfile(self): with patch.object(builtins, 'open', Mock(side_effect=IOError)): self.assertIsNone(PostmasterProcess.from_pidfile('')) with patch.object(builtins, 'open', mock_open(read_data='123\n')): self.assertIsNone(PostmasterProcess.from_pidfile('')) patroni-1.6.4/tests/test_rewind.py000066400000000000000000000132211361356115100172420ustar00rootroot00000000000000from mock import Mock, PropertyMock, patch from patroni.postgresql import Postgresql from patroni.postgresql.cancellable import CancellableSubprocess from patroni.postgresql.rewind import Rewind from . import BaseTestPostgresql, MockCursor, psycopg2_connect @patch('subprocess.call', Mock(return_value=0)) @patch('psycopg2.connect', psycopg2_connect) class TestRewind(BaseTestPostgresql): def setUp(self): super(TestRewind, self).setUp() self.r = Rewind(self.p) def test_can_rewind(self): with patch.object(Postgresql, 'controldata', Mock(return_value={'wal_log_hints setting': 'on'})): self.assertTrue(self.r.can_rewind) with patch('subprocess.call', Mock(return_value=1)): self.assertFalse(self.r.can_rewind) with patch('subprocess.call', side_effect=OSError): self.assertFalse(self.r.can_rewind) self.p.config._config['use_pg_rewind'] = False self.assertFalse(self.r.can_rewind) @patch.object(CancellableSubprocess, 'call') def test_pg_rewind(self, mock_cancellable_subprocess_call): r = {'user': '', 'host': '', 'port': '', 'database': '', 'password': ''} mock_cancellable_subprocess_call.return_value = 0 self.assertTrue(self.r.pg_rewind(r)) mock_cancellable_subprocess_call.side_effect = OSError self.assertFalse(self.r.pg_rewind(r)) @patch.object(Rewind, 'can_rewind', PropertyMock(return_value=True)) def test__get_local_timeline_lsn(self): self.r.trigger_check_diverged_lsn() with patch.object(Postgresql, 'controldata', Mock(return_value={'Database cluster state': 'shut down in recovery', 'Minimum recovery ending location': '0/0', "Min recovery ending loc's timeline": '0'})): self.r.rewind_or_reinitialize_needed_and_possible(self.leader) with patch.object(Postgresql, 'is_running', Mock(return_value=True)): with patch.object(MockCursor, 'fetchone', Mock(side_effect=[(False, ), Exception])): self.r.rewind_or_reinitialize_needed_and_possible(self.leader) @patch.object(CancellableSubprocess, 'call', Mock(return_value=0)) @patch.object(Postgresql, 'checkpoint', side_effect=['', '1'],) @patch.object(Postgresql, 'stop', Mock(return_value=False)) @patch.object(Postgresql, 'start', Mock()) def test_execute(self, mock_checkpoint): self.r.execute(self.leader) with patch.object(Rewind, 'pg_rewind', Mock(return_value=False)): mock_checkpoint.side_effect = ['1', '', '', ''] self.r.execute(self.leader) self.r.execute(self.leader) with patch.object(Rewind, 'check_leader_is_not_in_recovery', Mock(return_value=False)): self.r.execute(self.leader) self.p.config._config['remove_data_directory_on_rewind_failure'] = False self.r.trigger_check_diverged_lsn() self.r.execute(self.leader) self.leader.member.data.update(version='1.5.7', checkpoint_after_promote=False, role='master') self.assertIsNone(self.r.execute(self.leader)) del self.leader.member.data['checkpoint_after_promote'] with patch.object(Rewind, 'check_leader_is_not_in_recovery', Mock(return_value=False)): self.assertIsNone(self.r.execute(self.leader)) with patch.object(Postgresql, 'is_running', Mock(return_value=True)): self.r.execute(self.leader) @patch.object(Postgresql, 'start', Mock()) @patch.object(Rewind, 'can_rewind', PropertyMock(return_value=True)) @patch.object(Rewind, '_get_local_timeline_lsn', Mock(return_value=(2, '40159C1'))) @patch.object(Rewind, 'check_leader_is_not_in_recovery') def test__check_timeline_and_lsn(self, mock_check_leader_is_not_in_recovery): mock_check_leader_is_not_in_recovery.return_value = False self.r.trigger_check_diverged_lsn() self.assertFalse(self.r.rewind_or_reinitialize_needed_and_possible(self.leader)) self.leader = self.leader.member self.assertFalse(self.r.rewind_or_reinitialize_needed_and_possible(self.leader)) mock_check_leader_is_not_in_recovery.return_value = True self.assertFalse(self.r.rewind_or_reinitialize_needed_and_possible(self.leader)) self.r.trigger_check_diverged_lsn() with patch('psycopg2.connect', Mock(side_effect=Exception)): self.assertFalse(self.r.rewind_or_reinitialize_needed_and_possible(self.leader)) self.r.trigger_check_diverged_lsn() with patch.object(MockCursor, 'fetchone', Mock(side_effect=[('', 2, '0/0'), ('', b'3\t0/40159C0\tn\n')])): self.assertFalse(self.r.rewind_or_reinitialize_needed_and_possible(self.leader)) self.r.trigger_check_diverged_lsn() with patch.object(MockCursor, 'fetchone', Mock(return_value=('', 1, '0/0'))): with patch.object(Rewind, '_get_local_timeline_lsn', Mock(return_value=(1, '0/0'))): self.assertFalse(self.r.rewind_or_reinitialize_needed_and_possible(self.leader)) self.r.trigger_check_diverged_lsn() self.assertTrue(self.r.rewind_or_reinitialize_needed_and_possible(self.leader)) @patch.object(MockCursor, 'fetchone', Mock(side_effect=[(True,), Exception])) def test_check_leader_is_not_in_recovery(self): self.r.check_leader_is_not_in_recovery() self.r.check_leader_is_not_in_recovery() @patch.object(Postgresql, 'controldata', Mock(return_value={"Latest checkpoint's TimeLineID": 1})) def test_check_for_checkpoint_after_promote(self): self.r.check_for_checkpoint_after_promote() patroni-1.6.4/tests/test_utils.py000066400000000000000000000053641361356115100171230ustar00rootroot00000000000000import unittest from mock import Mock, patch from patroni.exceptions import PatroniException from patroni.utils import Retry, RetryFailedError, polling_loop, validate_directory class TestUtils(unittest.TestCase): def test_polling_loop(self): self.assertEqual(list(polling_loop(0.001, interval=0.001)), [0]) @patch('os.path.exists', Mock(return_value=True)) @patch('os.path.isdir', Mock(return_value=True)) @patch('tempfile.mkstemp', Mock(return_value=("", ""))) @patch('os.remove', Mock(side_effect=Exception)) def test_validate_directory_writable(self): self.assertRaises(Exception, validate_directory, "/tmp") @patch('os.path.exists', Mock(return_value=True)) @patch('os.path.isdir', Mock(return_value=True)) @patch('tempfile.mkstemp', Mock(side_effect=OSError)) def test_validate_directory_not_writable(self): self.assertRaises(PatroniException, validate_directory, "/tmp") @patch('os.path.exists', Mock(return_value=False)) @patch('os.makedirs', Mock(side_effect=OSError)) def test_validate_directory_couldnt_create(self): self.assertRaises(PatroniException, validate_directory, "/tmp") @patch('os.path.exists', Mock(return_value=True)) @patch('os.path.isdir', Mock(return_value=False)) def test_validate_directory_is_not_a_directory(self): self.assertRaises(PatroniException, validate_directory, "/tmp") @patch('time.sleep', Mock()) class TestRetrySleeper(unittest.TestCase): @staticmethod def _fail(times=1): scope = dict(times=0) def inner(): if scope['times'] >= times: pass else: scope['times'] += 1 raise PatroniException('Failed!') return inner def test_reset(self): retry = Retry(delay=0, max_tries=2) retry(self._fail()) self.assertEqual(retry._attempts, 1) retry.reset() self.assertEqual(retry._attempts, 0) def test_too_many_tries(self): retry = Retry(delay=0) self.assertRaises(RetryFailedError, retry, self._fail(times=999)) self.assertEqual(retry._attempts, 1) def test_maximum_delay(self): retry = Retry(delay=10, max_tries=100) retry(self._fail(times=10)) self.assertTrue(retry._cur_delay < 4000, retry._cur_delay) # gevent's sleep function is picky about the type self.assertEqual(type(retry._cur_delay), float) def test_deadline(self): retry = Retry(deadline=0.0001) self.assertRaises(RetryFailedError, retry, self._fail(times=100)) def test_copy(self): def _sleep(t): pass retry = Retry(sleep_func=_sleep) rcopy = retry.copy() self.assertTrue(rcopy.sleep_func is _sleep) patroni-1.6.4/tests/test_wale_restore.py000066400000000000000000000152271361356115100204550ustar00rootroot00000000000000import psycopg2 import subprocess import unittest from mock import Mock, PropertyMock, patch, mock_open from patroni.scripts import wale_restore from patroni.scripts.wale_restore import WALERestore, main as _main, get_major_version from six.moves import builtins from threading import current_thread from . import MockConnect, psycopg2_connect wale_output_header = ( b'name\tlast_modified\t' b'expanded_size_bytes\t' b'wal_segment_backup_start\twal_segment_offset_backup_start\t' b'wal_segment_backup_stop\twal_segment_offset_backup_stop\n' ) wale_output_values = ( b'base_00000001000000000000007F_00000040\t2015-05-18T10:13:25.000Z\t' b'167772160\t' b'00000001000000000000007F\t00000040\t' b'00000001000000000000007F\t00000240\n' ) wale_output = wale_output_header + wale_output_values wale_restore.RETRY_SLEEP_INTERVAL = 0.001 # Speed up retries WALE_TEST_RETRIES = 2 @patch('os.access', Mock(return_value=True)) @patch('os.makedirs', Mock(return_value=True)) @patch('os.path.exists', Mock(return_value=True)) @patch('os.path.isdir', Mock(return_value=True)) @patch('psycopg2.connect', psycopg2_connect) @patch('subprocess.check_output', Mock(return_value=wale_output)) class TestWALERestore(unittest.TestCase): def setUp(self): self.wale_restore = WALERestore('batman', '/data', 'host=batman port=5432 user=batman', '/etc', 100, 100, 1, 0, WALE_TEST_RETRIES) def test_should_use_s3_to_create_replica(self): self.__thread_ident = current_thread().ident sleeps = [0] def mock_sleep(*args): if current_thread().ident == self.__thread_ident: sleeps[0] += 1 self.assertTrue(self.wale_restore.should_use_s3_to_create_replica()) with patch.object(MockConnect, 'server_version', PropertyMock(return_value=100000)): self.assertTrue(self.wale_restore.should_use_s3_to_create_replica()) with patch('subprocess.check_output', Mock(return_value=wale_output.replace(b'167772160', b'1'))): self.assertFalse(self.wale_restore.should_use_s3_to_create_replica()) with patch('psycopg2.connect', Mock(side_effect=psycopg2.Error("foo"))): save_no_master = self.wale_restore.no_master save_master_connection = self.wale_restore.master_connection self.assertFalse(self.wale_restore.should_use_s3_to_create_replica()) with patch('time.sleep', mock_sleep): self.wale_restore.no_master = 1 self.assertTrue(self.wale_restore.should_use_s3_to_create_replica()) # verify retries self.assertEqual(sleeps[0], WALE_TEST_RETRIES) self.wale_restore.master_connection = '' self.assertTrue(self.wale_restore.should_use_s3_to_create_replica()) self.wale_restore.no_master = save_no_master self.wale_restore.master_connection = save_master_connection with patch('subprocess.check_output', Mock(side_effect=subprocess.CalledProcessError(1, "cmd", "foo"))): self.assertFalse(self.wale_restore.should_use_s3_to_create_replica()) with patch('subprocess.check_output', Mock(return_value=wale_output_header)): self.assertFalse(self.wale_restore.should_use_s3_to_create_replica()) with patch('subprocess.check_output', Mock(return_value=wale_output + wale_output_values)): self.assertFalse(self.wale_restore.should_use_s3_to_create_replica()) with patch('subprocess.check_output', Mock(return_value=wale_output.replace(b'expanded_size_bytes', b'expanded_size_foo'))): self.assertFalse(self.wale_restore.should_use_s3_to_create_replica()) def test_create_replica_with_s3(self): with patch('subprocess.call', Mock(return_value=0)): self.assertEqual(self.wale_restore.create_replica_with_s3(), 0) with patch.object(self.wale_restore, 'fix_subdirectory_path_if_broken', Mock(return_value=False)): self.assertEqual(self.wale_restore.create_replica_with_s3(), 2) with patch('subprocess.call', Mock(side_effect=Exception("foo"))): self.assertEqual(self.wale_restore.create_replica_with_s3(), 1) def test_run(self): self.wale_restore.init_error = True self.assertEqual(self.wale_restore.run(), 2) # this would do 2 retries 1 sec each self.wale_restore.init_error = False with patch.object(self.wale_restore, 'should_use_s3_to_create_replica', Mock(return_value=True)): with patch.object(self.wale_restore, 'create_replica_with_s3', Mock(return_value=0)): self.assertEqual(self.wale_restore.run(), 0) with patch.object(self.wale_restore, 'should_use_s3_to_create_replica', Mock(return_value=False)): self.assertEqual(self.wale_restore.run(), 2) with patch.object(self.wale_restore, 'should_use_s3_to_create_replica', Mock(return_value=None)): self.assertEqual(self.wale_restore.run(), 1) with patch.object(self.wale_restore, 'should_use_s3_to_create_replica', Mock(side_effect=Exception)): self.assertEqual(self.wale_restore.run(), 2) @patch('sys.exit', Mock()) def test_main(self): self.__thread_ident = current_thread().ident sleeps = [0] def mock_sleep(*args): if current_thread().ident == self.__thread_ident: sleeps[0] += 1 with patch.object(WALERestore, 'run', Mock(return_value=0)): self.assertEqual(_main(), 0) with patch.object(WALERestore, 'run', Mock(return_value=1)), \ patch('time.sleep', mock_sleep): self.assertEqual(_main(), 1) self.assertTrue(sleeps[0], WALE_TEST_RETRIES) @patch('os.path.isfile', Mock(return_value=True)) def test_get_major_version(self): with patch.object(builtins, 'open', mock_open(read_data='9.4')): self.assertEqual(get_major_version("data"), 9.4) with patch.object(builtins, 'open', side_effect=OSError): self.assertEqual(get_major_version("data"), 0.0) @patch('os.path.islink', Mock(return_value=True)) @patch('os.readlink', Mock(return_value="foo")) @patch('os.remove', Mock()) @patch('os.mkdir', Mock()) def test_fix_subdirectory_path_if_broken(self): with patch('os.path.exists', Mock(return_value=False)): # overriding the class-wide mock self.assertTrue(self.wale_restore.fix_subdirectory_path_if_broken("data1")) for fn in ('os.remove', 'os.mkdir'): with patch(fn, side_effect=OSError): self.assertFalse(self.wale_restore.fix_subdirectory_path_if_broken("data3")) patroni-1.6.4/tests/test_watchdog.py000066400000000000000000000206361361356115100175620ustar00rootroot00000000000000import ctypes import patroni.watchdog.linux as linuxwd import sys import unittest import os from mock import patch, Mock, PropertyMock from patroni.watchdog import Watchdog, WatchdogError from patroni.watchdog.base import NullWatchdog from patroni.watchdog.linux import LinuxWatchdogDevice class MockDevice(object): def __init__(self, fd, filename, flag): self.fd = fd self.filename = filename self.flag = flag self.timeout = 60 self.open = True self.writes = [] mock_devices = [None] def mock_open(filename, flag): fd = len(mock_devices) mock_devices.append(MockDevice(fd, filename, flag)) return fd def mock_ioctl(fd, op, arg=None, mutate_flag=False): assert 0 < fd < len(mock_devices) dev = mock_devices[fd] sys.stderr.write("Ioctl %d %d %r\n" % (fd, op, arg)) if op == linuxwd.WDIOC_GETSUPPORT: sys.stderr.write("Get support\n") assert(mutate_flag is True) arg.options = sum(map(linuxwd.WDIOF.get, ['SETTIMEOUT', 'KEEPALIVEPING'])) arg.identity = (ctypes.c_ubyte*32)(*map(ord, 'Mock Watchdog')) elif op == linuxwd.WDIOC_GETTIMEOUT: arg.value = dev.timeout elif op == linuxwd.WDIOC_SETTIMEOUT: sys.stderr.write("Set timeout called with %s\n" % arg.value) assert 0 < arg.value < 65535 dev.timeout = arg.value - 1 else: raise Exception("Unknown op %d", op) return 0 def mock_write(fd, string): assert 0 < fd < len(mock_devices) assert len(string) == 1 assert mock_devices[fd].open mock_devices[fd].writes.append(string) def mock_close(fd): assert 0 < fd < len(mock_devices) assert mock_devices[fd].open mock_devices[fd].open = False @unittest.skipIf(os.name == 'nt', "Windows not supported") @patch('os.open', mock_open) @patch('os.write', mock_write) @patch('os.close', mock_close) @patch('fcntl.ioctl', mock_ioctl) class TestWatchdog(unittest.TestCase): def setUp(self): mock_devices[:] = [None] @patch('platform.system', Mock(return_value='Linux')) @patch.object(LinuxWatchdogDevice, 'can_be_disabled', PropertyMock(return_value=True)) def test_unsafe_timeout_disable_watchdog_and_exit(self): watchdog = Watchdog({'ttl': 30, 'loop_wait': 15, 'watchdog': {'mode': 'required', 'safety_margin': -1}}) self.assertEqual(watchdog.activate(), False) self.assertEqual(watchdog.is_running, False) @patch('platform.system', Mock(return_value='Linux')) @patch.object(LinuxWatchdogDevice, 'get_timeout', Mock(return_value=16)) def test_timeout_does_not_ensure_safe_termination(self): Watchdog({'ttl': 30, 'loop_wait': 15, 'watchdog': {'mode': 'auto', 'safety_margin': -1}}).activate() self.assertEqual(len(mock_devices), 2) @patch('platform.system', Mock(return_value='Linux')) @patch.object(Watchdog, 'is_running', PropertyMock(return_value=False)) def test_watchdog_not_activated(self): self.assertFalse(Watchdog({'ttl': 30, 'loop_wait': 10, 'watchdog': {'mode': 'required'}}).activate()) @patch('platform.system', Mock(return_value='Linux')) @patch.object(LinuxWatchdogDevice, 'is_running', PropertyMock(return_value=False)) def test_watchdog_activate(self): with patch.object(LinuxWatchdogDevice, 'open', Mock(side_effect=WatchdogError(''))): self.assertTrue(Watchdog({'ttl': 30, 'loop_wait': 10, 'watchdog': {'mode': 'auto'}}).activate()) self.assertFalse(Watchdog({'ttl': 30, 'loop_wait': 10, 'watchdog': {'mode': 'required'}}).activate()) @patch('platform.system', Mock(return_value='Linux')) def test_basic_operation(self): watchdog = Watchdog({'ttl': 30, 'loop_wait': 10, 'watchdog': {'mode': 'required'}}) watchdog.activate() self.assertEqual(len(mock_devices), 2) device = mock_devices[-1] self.assertTrue(device.open) self.assertEqual(device.timeout, 24) watchdog.keepalive() self.assertEqual(len(device.writes), 1) watchdog.disable() self.assertFalse(device.open) self.assertEqual(device.writes[-1], b'V') def test_invalid_timings(self): watchdog = Watchdog({'ttl': 30, 'loop_wait': 20, 'watchdog': {'mode': 'automatic', 'safety_margin': -1}}) watchdog.activate() self.assertEqual(len(mock_devices), 1) self.assertFalse(watchdog.is_running) def test_parse_mode(self): with patch('patroni.watchdog.base.logger.warning', new_callable=Mock()) as warning_mock: watchdog = Watchdog({'ttl': 30, 'loop_wait': 10, 'watchdog': {'mode': 'bad'}}) self.assertEqual(watchdog.config.mode, 'off') warning_mock.assert_called_once() @patch('platform.system', Mock(return_value='Unknown')) def test_unsupported_platform(self): self.assertRaises(SystemExit, Watchdog, {'ttl': 30, 'loop_wait': 10, 'watchdog': {'mode': 'required', 'driver': 'bad'}}) def test_exceptions(self): wd = Watchdog({'ttl': 30, 'loop_wait': 10, 'watchdog': {'mode': 'bad'}}) wd.impl.close = wd.impl.keepalive = Mock(side_effect=WatchdogError('')) self.assertTrue(wd.activate()) self.assertIsNone(wd.keepalive()) self.assertIsNone(wd.disable()) @patch('platform.system', Mock(return_value='Linux')) def test_config_reload(self): watchdog = Watchdog({'ttl': 30, 'loop_wait': 15, 'watchdog': {'mode': 'required'}}) self.assertTrue(watchdog.activate()) self.assertTrue(watchdog.is_running) watchdog.reload_config({'ttl': 30, 'loop_wait': 15, 'watchdog': {'mode': 'off'}}) self.assertFalse(watchdog.is_running) watchdog.reload_config({'ttl': 30, 'loop_wait': 15, 'watchdog': {'mode': 'required'}}) self.assertFalse(watchdog.is_running) watchdog.keepalive() self.assertTrue(watchdog.is_running) watchdog.disable() watchdog.reload_config({'ttl': 30, 'loop_wait': 15, 'watchdog': {'mode': 'required', 'driver': 'unknown'}}) self.assertFalse(watchdog.is_healthy) self.assertFalse(watchdog.activate()) watchdog.reload_config({'ttl': 30, 'loop_wait': 15, 'watchdog': {'mode': 'required'}}) self.assertFalse(watchdog.is_running) watchdog.keepalive() self.assertTrue(watchdog.is_running) watchdog.reload_config({'ttl': 60, 'loop_wait': 15, 'watchdog': {'mode': 'required'}}) watchdog.keepalive() class TestNullWatchdog(unittest.TestCase): def test_basics(self): watchdog = NullWatchdog() self.assertTrue(watchdog.can_be_disabled) self.assertRaises(WatchdogError, watchdog.set_timeout, 1) self.assertEqual(watchdog.describe(), 'NullWatchdog') self.assertIsInstance(NullWatchdog.from_config({}), NullWatchdog) @unittest.skipIf(os.name == 'nt', "Windows not supported") class TestLinuxWatchdogDevice(unittest.TestCase): def setUp(self): self.impl = LinuxWatchdogDevice.from_config({}) @patch('os.open', Mock(return_value=3)) @patch('os.write', Mock(side_effect=OSError)) @patch('fcntl.ioctl', Mock(return_value=0)) def test_basics(self): self.impl.open() try: if self.impl.get_support().has_foo: self.assertFail() except Exception as e: self.assertTrue(isinstance(e, AttributeError)) self.assertRaises(WatchdogError, self.impl.close) self.assertRaises(WatchdogError, self.impl.keepalive) self.assertRaises(WatchdogError, self.impl.set_timeout, -1) @patch('os.open', Mock(return_value=3)) @patch('fcntl.ioctl', Mock(side_effect=OSError)) def test__ioctl(self): self.assertRaises(WatchdogError, self.impl.get_support) self.impl.open() self.assertRaises(WatchdogError, self.impl.get_support) def test_is_healthy(self): self.assertFalse(self.impl.is_healthy) @patch('os.open', Mock(return_value=3)) @patch('fcntl.ioctl', Mock(side_effect=OSError)) def test_error_handling(self): self.impl.open() self.assertRaises(WatchdogError, self.impl.get_timeout) self.assertRaises(WatchdogError, self.impl.set_timeout, 10) # We still try to output a reasonable string even if getting info errors self.assertEqual(self.impl.describe(), "Linux watchdog device") @patch('os.open', Mock(side_effect=OSError)) def test_open(self): self.assertRaises(WatchdogError, self.impl.open) patroni-1.6.4/tests/test_zookeeper.py000066400000000000000000000213571361356115100177660ustar00rootroot00000000000000import select import six import unittest from kazoo.client import KazooState from kazoo.exceptions import NoNodeError, NodeExistsError from kazoo.handlers.threading import SequentialThreadingHandler from kazoo.protocol.states import ZnodeStat from mock import Mock, patch from patroni.dcs.zookeeper import Leader, PatroniSequentialThreadingHandler, ZooKeeper, ZooKeeperError class MockKazooClient(Mock): leader = False exists = True def __init__(self, *args, **kwargs): super(MockKazooClient, self).__init__() self._session_timeout = 30 @property def client_id(self): return (-1, '') @staticmethod def retry(func, *args, **kwargs): return func(*args, **kwargs) def get(self, path, watch=None): if not isinstance(path, six.string_types): raise TypeError("Invalid type for 'path' (string expected)") if path == '/no_node': raise NoNodeError elif '/members/' in path: return ( b'postgres://repuser:rep-pass@localhost:5434/postgres?application_name=http://127.0.0.1:8009/patroni', ZnodeStat(0, 0, 0, 0, 0, 0, 0, 0 if self.exists else -1, 0, 0, 0) ) elif path.endswith('/leader'): if self.leader: return (b'foo', ZnodeStat(0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0)) return (b'foo', ZnodeStat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) elif path.endswith('/initialize'): return (b'foo', ZnodeStat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) return (b'', ZnodeStat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) @staticmethod def get_children(path, watch=None, include_data=False): if not isinstance(path, six.string_types): raise TypeError("Invalid type for 'path' (string expected)") if path.startswith('/no_node'): raise NoNodeError elif path in ['/service/bla/', '/service/test/']: return ['initialize', 'leader', 'members', 'optime', 'failover', 'sync'] return ['foo', 'bar', 'buzz'] def create(self, path, value=b"", acl=None, ephemeral=False, sequence=False, makepath=False): if not isinstance(path, six.string_types): raise TypeError("Invalid type for 'path' (string expected)") if not isinstance(value, (six.binary_type,)): raise TypeError("Invalid type for 'value' (must be a byte string)") if b'Exception' in value: raise Exception if path.endswith('/initialize') or path == '/service/test/optime/leader': raise Exception elif b'retry' in value or (b'exists' in value and self.exists): raise NodeExistsError def create_async(self, path, value=b"", acl=None, ephemeral=False, sequence=False, makepath=False): return self.create(path, value, acl, ephemeral, sequence, makepath) or Mock() @staticmethod def set(path, value, version=-1): if not isinstance(path, six.string_types): raise TypeError("Invalid type for 'path' (string expected)") if not isinstance(value, (six.binary_type,)): raise TypeError("Invalid type for 'value' (must be a byte string)") if path == '/service/bla/optime/leader': raise Exception if path == '/service/test/members/bar' and b'retry' in value: return if path in ('/service/test/failover', '/service/test/config', '/service/test/sync'): if b'Exception' in value: raise Exception elif value == b'ok': return raise NoNodeError def set_async(self, path, value, version=-1): return self.set(path, value, version) or Mock() def delete(self, path, version=-1, recursive=False): if not isinstance(path, six.string_types): raise TypeError("Invalid type for 'path' (string expected)") self.exists = False if path == '/service/test/leader': self.leader = True raise Exception elif path == '/service/test/members/buzz': raise Exception elif path.endswith('/') or path.endswith('/initialize') or path == '/service/test/members/bar': raise NoNodeError def delete_async(self, path, version=-1, recursive=False): return self.delete(path, version, recursive) or Mock() class TestPatroniSequentialThreadingHandler(unittest.TestCase): def setUp(self): self.handler = PatroniSequentialThreadingHandler(10) @patch.object(SequentialThreadingHandler, 'create_connection', Mock()) def test_create_connection(self): self.assertIsNotNone(self.handler.create_connection(())) self.assertIsNotNone(self.handler.create_connection((), 40)) self.assertIsNotNone(self.handler.create_connection(timeout=40)) @patch.object(SequentialThreadingHandler, 'select', Mock(side_effect=ValueError)) def test_select(self): self.assertRaises(select.error, self.handler.select) class TestZooKeeper(unittest.TestCase): @patch('patroni.dcs.zookeeper.KazooClient', MockKazooClient) def setUp(self): self.zk = ZooKeeper({'hosts': ['localhost:2181'], 'scope': 'test', 'name': 'foo', 'ttl': 30, 'retry_timeout': 10, 'loop_wait': 10}) def test_session_listener(self): self.zk.session_listener(KazooState.SUSPENDED) def test_reload_config(self): self.zk.reload_config({'ttl': 20, 'retry_timeout': 10, 'loop_wait': 10}) self.zk.reload_config({'ttl': 20, 'retry_timeout': 10, 'loop_wait': 5}) def test_get_node(self): self.assertIsNone(self.zk.get_node('/no_node')) def test_get_children(self): self.assertListEqual(self.zk.get_children('/no_node'), []) def test__inner_load_cluster(self): self.zk._base_path = self.zk._base_path.replace('test', 'bla') self.zk._inner_load_cluster() self.zk._base_path = self.zk._base_path = '/no_node' self.zk._inner_load_cluster() def test_get_cluster(self): self.assertRaises(ZooKeeperError, self.zk.get_cluster) cluster = self.zk.get_cluster() self.assertIsInstance(cluster.leader, Leader) self.zk.touch_member({'foo': 'foo'}) def test_delete_leader(self): self.assertTrue(self.zk.delete_leader()) def test_set_failover_value(self): self.zk.set_failover_value('') self.zk.set_failover_value('ok') self.zk.set_failover_value('Exception') def test_set_config_value(self): self.zk.set_config_value('', 1) self.zk.set_config_value('ok') self.zk.set_config_value('Exception') def test_initialize(self): self.assertFalse(self.zk.initialize()) def test_cancel_initialization(self): self.zk.cancel_initialization() def test_touch_member(self): self.zk._name = 'buzz' self.zk.get_cluster() self.zk.touch_member({'new': 'new'}) self.zk._name = 'bar' self.zk.touch_member({'new': 'new'}) self.zk._name = 'na' self.zk._client.exists = 1 self.zk.touch_member({'Exception': 'Exception'}) self.zk._name = 'bar' self.zk.touch_member({'retry': 'retry'}) self.zk._fetch_cluster = True self.zk.get_cluster() self.zk.touch_member({'conn_url': 'postgres://repuser:rep-pass@localhost:5434/postgres', 'api_url': 'http://127.0.0.1:8009/patroni'}) def test_take_leader(self): self.zk.take_leader() with patch.object(MockKazooClient, 'create', Mock(side_effect=Exception)): self.zk.take_leader() def test_update_leader(self): self.assertTrue(self.zk.update_leader(None)) def test_write_leader_optime(self): self.zk.last_leader_operation = '0' self.zk.write_leader_optime('1') with patch.object(MockKazooClient, 'create_async', Mock()): self.zk.write_leader_optime('1') with patch.object(MockKazooClient, 'set_async', Mock()): self.zk.write_leader_optime('2') self.zk._base_path = self.zk._base_path.replace('test', 'bla') self.zk.write_leader_optime('3') def test_delete_cluster(self): self.assertTrue(self.zk.delete_cluster()) def test_watch(self): self.zk.watch(None, 0) self.zk.event.isSet = Mock(return_value=True) self.zk.watch(None, 0) def test__kazoo_connect(self): self.zk._client._retry.deadline = 1 self.zk._orig_kazoo_connect = Mock(return_value=(0, 0)) self.zk._kazoo_connect(None, None) def test_sync_state(self): self.zk.set_sync_state_value('') self.zk.set_sync_state_value('ok') self.zk.set_sync_state_value('Exception') self.zk.delete_sync_state() def test_set_history_value(self): self.zk.set_history_value('{}') patroni-1.6.4/tox.ini000066400000000000000000000000351361356115100145110ustar00rootroot00000000000000[flake8] max-line-length=120